summary refs log tree commit diff
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2019-03-15 14:36:44 +0000
committerErik Johnston <erik@matrix.org>2019-03-15 14:36:44 +0000
commit44c0661d9712d822e45666507e4a2ebdb0500fb1 (patch)
treedee85616a7d09a90c628829d154cc56f44dd500f
parentAdd some debug logging for device list handling (diff)
parentfix some typos in federate.md (diff)
downloadsynapse-44c0661d9712d822e45666507e4a2ebdb0500fb1.tar.xz
Merge branch 'master' of github.com:matrix-org/synapse into erikj/dinsic-merged-master
-rw-r--r--.circleci/config.yml14
-rwxr-xr-x.circleci/merge_base_branch.sh4
-rw-r--r--.codecov.yml15
-rw-r--r--.coveragerc7
-rw-r--r--.dockerignore2
-rw-r--r--.editorconfig9
-rw-r--r--.github/ISSUE_TEMPLATE/BUG_REPORT.md (renamed from .github/ISSUE_TEMPLATE.md)40
-rw-r--r--.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md9
-rw-r--r--.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md9
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md7
-rw-r--r--.github/SUPPORT.md3
-rw-r--r--.gitignore83
-rw-r--r--.travis.yml73
-rw-r--r--AUTHORS.rst5
-rw-r--r--CHANGES.md401
-rw-r--r--CONTRIBUTING.rst39
-rw-r--r--INSTALL.md430
-rw-r--r--MANIFEST.in5
-rw-r--r--README.rst705
-rw-r--r--UPGRADE.rst105
-rw-r--r--changelog.d/4832.misc1
-rw-r--r--contrib/docker/docker-compose.yml10
-rw-r--r--contrib/prometheus/README.md (renamed from contrib/prometheus/README)7
-rw-r--r--contrib/purge_api/README.md16
-rw-r--r--contrib/purge_api/purge_history.sh141
-rw-r--r--contrib/purge_api/purge_remote_media.sh54
-rw-r--r--contrib/systemd/matrix-synapse.service31
-rw-r--r--contrib/systemd/synapse.service22
-rw-r--r--debian/.gitignore7
-rw-r--r--debian/NEWS32
-rwxr-xr-xdebian/build_virtualenv91
-rw-r--r--debian/changelog698
-rw-r--r--debian/compat1
-rw-r--r--debian/control40
-rw-r--r--debian/copyright118
-rw-r--r--debian/dirs3
-rw-r--r--debian/hash_password.190
-rw-r--r--debian/hash_password.ronn69
-rw-r--r--debian/install2
-rw-r--r--debian/log.yaml36
-rwxr-xr-xdebian/manage_debconf.pl130
-rw-r--r--debian/manpages4
-rwxr-xr-xdebian/matrix-synapse-py3.config12
-rw-r--r--debian/matrix-synapse-py3.links4
-rw-r--r--debian/matrix-synapse-py3.postinst56
-rw-r--r--debian/matrix-synapse-py3.preinst31
-rw-r--r--debian/matrix-synapse-py3.triggers9
-rw-r--r--debian/matrix-synapse.default2
-rw-r--r--debian/matrix-synapse.service16
-rw-r--r--debian/po/POTFILES.in1
-rw-r--r--debian/po/templates.pot56
-rw-r--r--debian/register_new_matrix_user.172
-rw-r--r--debian/register_new_matrix_user.ronn61
-rwxr-xr-xdebian/rules22
-rw-r--r--debian/source/format1
-rw-r--r--debian/synapse_port_db.198
-rw-r--r--debian/synapse_port_db.ronn87
-rw-r--r--debian/synctl.163
-rw-r--r--debian/synctl.ronn70
-rw-r--r--debian/templates19
-rw-r--r--demo/.gitignore7
-rw-r--r--demo/demo.tls.dh9
-rw-r--r--docker/Dockerfile24
-rw-r--r--docker/Dockerfile-dhvirtualenv68
-rw-r--r--docker/README.md88
-rw-r--r--docker/build_debian.sh27
-rw-r--r--docker/conf/homeserver.yaml21
-rwxr-xr-xdocker/start.py18
-rw-r--r--docs/ACME.md129
-rw-r--r--docs/MSC1711_certificates_FAQ.md338
-rw-r--r--docs/admin_api/purge_history_api.rst8
-rw-r--r--docs/admin_api/register_api.rst11
-rw-r--r--docs/consent_tracking.md39
-rw-r--r--docs/federate.md123
-rw-r--r--docs/log_contexts.rst58
-rw-r--r--docs/privacy_policy_templates/en/1.0.html15
-rw-r--r--docs/reverse_proxy.rst112
-rw-r--r--docs/tcp_replication.rst26
-rw-r--r--docs/turn-howto.rst5
-rw-r--r--docs/workers.rst12
-rwxr-xr-xjenkins/prepare_synapse.sh19
-rwxr-xr-xscripts-dev/build_debian_packages154
-rwxr-xr-xscripts-dev/check-newsfragment41
-rwxr-xr-xscripts-dev/federation_client.py9
-rwxr-xr-xscripts-dev/make_identicons.pl39
-rwxr-xr-xscripts/generate_config67
-rwxr-xr-xscripts/hash_password41
-rwxr-xr-xscripts/synapse_port_db1
-rwxr-xr-xsetup.py16
-rw-r--r--synapse/__init__.py4
-rw-r--r--synapse/_scripts/register_new_matrix_user.py23
-rw-r--r--synapse/api/auth.py134
-rw-r--r--synapse/api/constants.py44
-rw-r--r--synapse/api/errors.py18
-rw-r--r--synapse/api/filtering.py25
-rw-r--r--synapse/api/urls.py5
-rw-r--r--synapse/app/__init__.py34
-rw-r--r--synapse/app/_base.py170
-rw-r--r--synapse/app/appservice.py7
-rw-r--r--synapse/app/client_reader.py17
-rw-r--r--synapse/app/event_creator.py13
-rw-r--r--synapse/app/federation_reader.py28
-rw-r--r--synapse/app/federation_sender.py16
-rw-r--r--synapse/app/frontend_proxy.py28
-rwxr-xr-xsynapse/app/homeserver.py210
-rw-r--r--synapse/app/media_repository.py13
-rw-r--r--synapse/app/pusher.py3
-rw-r--r--synapse/app/synchrotron.py21
-rw-r--r--synapse/app/user_dir.py17
-rw-r--r--synapse/appservice/scheduler.py36
-rw-r--r--synapse/config/__main__.py2
-rw-r--r--synapse/config/_base.py69
-rw-r--r--synapse/config/api.py3
-rw-r--r--synapse/config/appservice.py7
-rw-r--r--synapse/config/captcha.py5
-rw-r--r--synapse/config/cas.py1
-rw-r--r--synapse/config/consent_config.py56
-rw-r--r--synapse/config/database.py5
-rw-r--r--synapse/config/groups.py4
-rw-r--r--synapse/config/homeserver.py11
-rw-r--r--synapse/config/jwt_config.py8
-rw-r--r--synapse/config/key.py41
-rw-r--r--synapse/config/logger.py25
-rw-r--r--synapse/config/metrics.py46
-rw-r--r--synapse/config/password.py1
-rw-r--r--synapse/config/password_auth_providers.py28
-rw-r--r--synapse/config/push.py4
-rw-r--r--synapse/config/ratelimiting.py7
-rw-r--r--synapse/config/registration.py50
-rw-r--r--synapse/config/repository.py91
-rw-r--r--synapse/config/room_directory.py170
-rw-r--r--synapse/config/saml2.py55
-rw-r--r--synapse/config/saml2_config.py109
-rw-r--r--synapse/config/server.py371
-rw-r--r--synapse/config/server_notices_config.py10
-rw-r--r--synapse/config/spam_checker.py8
-rw-r--r--synapse/config/tls.py350
-rw-r--r--synapse/config/user_directory.py1
-rw-r--r--synapse/config/voip.py7
-rw-r--r--synapse/crypto/context_factory.py39
-rw-r--r--synapse/crypto/event_signing.py109
-rw-r--r--synapse/crypto/keyclient.py147
-rw-r--r--synapse/crypto/keyring.py200
-rw-r--r--synapse/event_auth.py43
-rw-r--r--synapse/events/__init__.py178
-rw-r--r--synapse/events/builder.py279
-rw-r--r--synapse/events/utils.py50
-rw-r--r--synapse/events/validator.py65
-rw-r--r--synapse/federation/federation_base.py96
-rw-r--r--synapse/federation/federation_client.py200
-rw-r--r--synapse/federation/federation_server.py115
-rw-r--r--synapse/federation/transaction_queue.py52
-rw-r--r--synapse/federation/transport/client.py169
-rw-r--r--synapse/federation/transport/server.py196
-rw-r--r--synapse/federation/units.py3
-rw-r--r--synapse/groups/attestations.py7
-rw-r--r--synapse/handlers/__init__.py2
-rw-r--r--synapse/handlers/_base.py2
-rw-r--r--synapse/handlers/acme.py151
-rw-r--r--synapse/handlers/auth.py34
-rw-r--r--synapse/handlers/device.py32
-rw-r--r--synapse/handlers/directory.py67
-rw-r--r--synapse/handlers/e2e_room_keys.py94
-rw-r--r--synapse/handlers/federation.py377
-rw-r--r--synapse/handlers/groups_local.py12
-rw-r--r--synapse/handlers/identity.py10
-rw-r--r--synapse/handlers/message.py64
-rw-r--r--synapse/handlers/pagination.py29
-rw-r--r--synapse/handlers/receipts.py68
-rw-r--r--synapse/handlers/register.py402
-rw-r--r--synapse/handlers/room.py443
-rw-r--r--synapse/handlers/room_list.py28
-rw-r--r--synapse/handlers/room_member.py90
-rw-r--r--synapse/handlers/search.py50
-rw-r--r--synapse/handlers/sync.py37
-rw-r--r--synapse/handlers/typing.py14
-rw-r--r--synapse/handlers/user_directory.py137
-rw-r--r--synapse/http/__init__.py15
-rw-r--r--synapse/http/client.py394
-rw-r--r--synapse/http/endpoint.py315
-rw-r--r--synapse/http/federation/__init__.py (renamed from synapse/rest/key/v1/__init__.py)2
-rw-r--r--synapse/http/federation/matrix_federation_agent.py452
-rw-r--r--synapse/http/federation/srv_resolver.py169
-rw-r--r--synapse/http/matrixfederationclient.py333
-rw-r--r--synapse/http/server.py22
-rw-r--r--synapse/http/servlet.py9
-rw-r--r--synapse/metrics/__init__.py2
-rw-r--r--synapse/module_api/__init__.py2
-rw-r--r--synapse/push/clientformat.py2
-rw-r--r--synapse/push/emailpusher.py5
-rw-r--r--synapse/push/httppusher.py58
-rw-r--r--synapse/push/mailer.py13
-rw-r--r--synapse/push/push_rule_evaluator.py4
-rw-r--r--synapse/push/pusher.py11
-rw-r--r--synapse/push/pusherpool.py18
-rw-r--r--synapse/python_dependencies.py264
-rw-r--r--synapse/replication/http/__init__.py4
-rw-r--r--synapse/replication/http/_base.py5
-rw-r--r--synapse/replication/http/federation.py8
-rw-r--r--synapse/replication/http/login.py74
-rw-r--r--synapse/replication/http/membership.py4
-rw-r--r--synapse/replication/http/register.py146
-rw-r--r--synapse/replication/http/send_event.py8
-rw-r--r--synapse/replication/slave/storage/_base.py14
-rw-r--r--synapse/replication/tcp/client.py2
-rw-r--r--synapse/replication/tcp/protocol.py21
-rw-r--r--synapse/rest/__init__.py15
-rw-r--r--synapse/rest/client/v1/admin.py11
-rw-r--r--synapse/rest/client/v1/login.py267
-rw-r--r--synapse/rest/client/v1/push_rule.py35
-rw-r--r--synapse/rest/client/v1/pusher.py2
-rw-r--r--synapse/rest/client/v1/room.py8
-rw-r--r--synapse/rest/client/v1_only/__init__.py3
-rw-r--r--synapse/rest/client/v1_only/base.py39
-rw-r--r--synapse/rest/client/v1_only/register.py392
-rw-r--r--synapse/rest/client/v2_alpha/account_data.py34
-rw-r--r--synapse/rest/client/v2_alpha/auth.py117
-rw-r--r--synapse/rest/client/v2_alpha/capabilities.py66
-rw-r--r--synapse/rest/client/v2_alpha/register.py213
-rw-r--r--synapse/rest/client/v2_alpha/room_keys.py55
-rw-r--r--synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py89
-rw-r--r--synapse/rest/client/v2_alpha/sync.py2
-rw-r--r--synapse/rest/client/versions.py1
-rw-r--r--synapse/rest/consent/consent_resource.py52
-rw-r--r--synapse/rest/key/v1/server_key_resource.py92
-rw-r--r--synapse/rest/media/v1/_base.py133
-rw-r--r--synapse/rest/media/v1/config_resource.py2
-rw-r--r--synapse/rest/media/v1/download_resource.py3
-rw-r--r--synapse/rest/media/v1/identicon_resource.py68
-rw-r--r--synapse/rest/media/v1/media_repository.py57
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py87
-rw-r--r--synapse/rest/saml2/__init__.py (renamed from synapse/storage/schema/delta/34/sent_txn_purge.py)25
-rw-r--r--synapse/rest/saml2/metadata_resource.py36
-rw-r--r--synapse/rest/saml2/response_resource.py74
-rw-r--r--synapse/rest/well_known.py73
-rw-r--r--synapse/server.py29
-rw-r--r--synapse/server.pyi6
-rw-r--r--synapse/state/__init__.py8
-rw-r--r--synapse/state/v1.py18
-rw-r--r--synapse/state/v2.py34
-rw-r--r--synapse/static/client/login/index.html37
-rw-r--r--synapse/static/client/login/js/login.js32
-rw-r--r--synapse/static/client/login/style.css19
-rw-r--r--synapse/static/client/register/index.html2
-rw-r--r--synapse/static/index.html63
-rw-r--r--synapse/storage/__init__.py16
-rw-r--r--synapse/storage/_base.py476
-rw-r--r--synapse/storage/background_updates.py2
-rw-r--r--synapse/storage/client_ips.py237
-rw-r--r--synapse/storage/devices.py100
-rw-r--r--synapse/storage/e2e_room_keys.py42
-rw-r--r--synapse/storage/end_to_end_keys.py5
-rw-r--r--synapse/storage/engines/__init__.py2
-rw-r--r--synapse/storage/engines/postgres.py39
-rw-r--r--synapse/storage/engines/sqlite.py (renamed from synapse/storage/engines/sqlite3.py)17
-rw-r--r--synapse/storage/event_federation.py27
-rw-r--r--synapse/storage/events.py334
-rw-r--r--synapse/storage/events_worker.py86
-rw-r--r--synapse/storage/monthly_active_users.py121
-rw-r--r--synapse/storage/prepare_database.py4
-rw-r--r--synapse/storage/pusher.py9
-rw-r--r--synapse/storage/registration.py368
-rw-r--r--synapse/storage/room.py2
-rw-r--r--synapse/storage/roommember.py8
-rw-r--r--synapse/storage/schema/delta/40/device_list_streams.sql9
-rw-r--r--synapse/storage/schema/delta/52/add_event_to_state_group_index.sql19
-rw-r--r--synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql36
-rw-r--r--synapse/storage/schema/delta/52/e2e_room_keys.sql53
-rw-r--r--synapse/storage/schema/delta/53/add_user_type_to_users.sql19
-rw-r--r--synapse/storage/schema/delta/53/drop_sent_transactions.sql (renamed from synapse/storage/schema/delta/11/v11.sql)4
-rw-r--r--synapse/storage/schema/delta/53/event_format_version.sql16
-rw-r--r--synapse/storage/schema/delta/53/user_ips_index.sql30
-rw-r--r--synapse/storage/schema/full_schemas/11/transactions.sql19
-rw-r--r--synapse/storage/schema/full_schemas/16/transactions.sql19
-rw-r--r--synapse/storage/search.py6
-rw-r--r--synapse/storage/state.py76
-rw-r--r--synapse/storage/user_directory.py212
-rw-r--r--synapse/types.py66
-rw-r--r--synapse/util/async_helpers.py14
-rw-r--r--synapse/util/caches/ttlcache.py161
-rw-r--r--synapse/util/logcontext.py5
-rw-r--r--synapse/util/stringutils.py39
-rwxr-xr-xsynctl4
-rw-r--r--tests/__init__.py7
-rw-r--r--tests/api/test_auth.py157
-rw-r--r--tests/app/test_frontend_proxy.py2
-rw-r--r--tests/app/test_openid_listener.py119
-rw-r--r--tests/config/test_generate.py3
-rw-r--r--tests/config/test_room_directory.py79
-rw-r--r--tests/config/test_tls.py79
-rw-r--r--tests/crypto/test_event_signing.py56
-rw-r--r--tests/crypto/test_keyring.py18
-rw-r--r--tests/handlers/test_directory.py1
-rw-r--r--tests/handlers/test_e2e_room_keys.py151
-rw-r--r--tests/handlers/test_register.py59
-rw-r--r--tests/handlers/test_user_directory.py91
-rw-r--r--tests/http/__init__.py42
-rw-r--r--tests/http/federation/__init__.py14
-rw-r--r--tests/http/federation/test_matrix_federation_agent.py991
-rw-r--r--tests/http/federation/test_srv_resolver.py207
-rw-r--r--tests/http/server.pem81
-rw-r--r--tests/http/test_fedclient.py169
-rw-r--r--tests/patch_inline_callbacks.py90
-rw-r--r--tests/push/__init__.py0
-rw-r--r--tests/push/test_email.py148
-rw-r--r--tests/push/test_http.py160
-rw-r--r--tests/replication/slave/storage/test_events.py4
-rw-r--r--tests/rest/client/test_consent.py118
-rw-r--r--tests/rest/client/v1/test_admin.py149
-rw-r--r--tests/rest/client/v1/test_events.py100
-rw-r--r--tests/rest/client/v1/test_register.py89
-rw-r--r--tests/rest/client/v1/utils.py22
-rw-r--r--tests/rest/client/v2_alpha/test_auth.py104
-rw-r--r--tests/rest/client/v2_alpha/test_capabilities.py78
-rw-r--r--tests/rest/client/v2_alpha/test_filter.py95
-rw-r--r--tests/rest/client/v2_alpha/test_register.py138
-rw-r--r--tests/rest/client/v2_alpha/test_sync.py123
-rw-r--r--tests/rest/media/v1/test_media_storage.py146
-rw-r--r--tests/rest/media/v1/test_url_preview.py470
-rw-r--r--tests/rest/test_well_known.py58
-rw-r--r--tests/server.py160
-rw-r--r--tests/server_notices/test_resource_limits_server_notices.py10
-rw-r--r--tests/state/test_v2.py106
-rw-r--r--tests/storage/test__base.py88
-rw-r--r--tests/storage/test_background_update.py2
-rw-r--r--tests/storage/test_base.py6
-rw-r--r--tests/storage/test_client_ips.py71
-rw-r--r--tests/storage/test_end_to_end_keys.py18
-rw-r--r--tests/storage/test_keys.py3
-rw-r--r--tests/storage/test_monthly_active_users.py65
-rw-r--r--tests/storage/test_redaction.py5
-rw-r--r--tests/storage/test_registration.py22
-rw-r--r--tests/storage/test_roommember.py3
-rw-r--r--tests/storage/test_state.py6
-rw-r--r--tests/test_dns.py129
-rw-r--r--tests/test_event_auth.py13
-rw-r--r--tests/test_federation.py6
-rw-r--r--tests/test_mau.py55
-rw-r--r--tests/test_metrics.py24
-rw-r--r--tests/test_server.py27
-rw-r--r--tests/test_terms_auth.py123
-rw-r--r--tests/test_types.py31
-rw-r--r--tests/test_utils/__init__.py18
-rw-r--r--tests/test_utils/logging_setup.py54
-rw-r--r--tests/test_visibility.py6
-rw-r--r--tests/unittest.py101
-rw-r--r--tests/util/caches/test_ttlcache.py83
-rw-r--r--tests/util/test_async_utils.py104
-rw-r--r--tests/utils.py13
-rw-r--r--tox.ini72
350 files changed, 19081 insertions, 6413 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 5395028426..137747dae3 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -4,23 +4,21 @@ jobs:
     machine: true
     steps:
       - checkout
-      - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG} .
-      - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
+      - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG}-py2 .
+      - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
       - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
       - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
+      - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py2
       - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
   dockerhubuploadlatest:
     machine: true
     steps:
       - checkout
-      - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1} .
-      - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1}-py3 --build-arg PYTHON_VERSION=3.6 .
+      - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest-py2 .
+      - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 .
       - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
-      - run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1} matrixdotorg/synapse:latest
-      - run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1}-py3 matrixdotorg/synapse:latest-py3
-      - run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}
-      - run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}-py3
       - run: docker push matrixdotorg/synapse:latest
+      - run: docker push matrixdotorg/synapse:latest-py2
       - run: docker push matrixdotorg/synapse:latest-py3
   sytestpy2:
     docker:
diff --git a/.circleci/merge_base_branch.sh b/.circleci/merge_base_branch.sh
index b2c8c40f4c..4c19fa70d7 100755
--- a/.circleci/merge_base_branch.sh
+++ b/.circleci/merge_base_branch.sh
@@ -20,7 +20,7 @@ else
 fi
 
 # Show what we are before
-git show -s
+git --no-pager show -s
 
 # Set up username so it can do a merge
 git config --global user.email bot@matrix.org
@@ -31,4 +31,4 @@ git fetch -u origin $GITBASE
 git merge --no-edit origin/$GITBASE
 
 # Show what we are after.
-git show -s
+git --no-pager show -s
diff --git a/.codecov.yml b/.codecov.yml
new file mode 100644
index 0000000000..a05698a39c
--- /dev/null
+++ b/.codecov.yml
@@ -0,0 +1,15 @@
+comment:
+  layout: "diff"
+
+coverage:
+  status:
+    project:
+      default:
+        target: 0  # Target % coverage, can be auto. Turned off for now
+        threshold: null
+        base: auto
+    patch:
+      default:
+        target: 0
+        threshold: null
+        base: auto
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000000..e9460a340a
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,7 @@
+[run]
+branch = True
+parallel = True
+include = synapse/*
+
+[report]
+precision = 2
diff --git a/.dockerignore b/.dockerignore
index 0180602e56..3c3996eb4c 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -5,3 +5,5 @@ demo/etc
 tox.ini
 .git/*
 .tox/*
+debian/matrix-synapse/
+debian/matrix-synapse-*/
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000000..3edf9e717c
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,9 @@
+# EditorConfig https://EditorConfig.org
+
+# top-most EditorConfig file
+root = true
+
+# 4 space indentation
+[*.py]
+indent_style = space
+indent_size = 4
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE/BUG_REPORT.md
index 21acb3202a..5cf844bfb1 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.md
@@ -1,6 +1,12 @@
-<!-- 
+---
+name: Bug report
+about: Create a report to help us improve
 
-**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**: 
+---
+
+<!--
+
+**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
 You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
 
 
@@ -17,32 +23,44 @@ Text between <!-- and --​> marks will be invisible in the report.
 
 ### Description
 
-Describe here the problem that you are experiencing, or the feature you are requesting.
+<!-- Describe here the problem that you are experiencing -->
 
 ### Steps to reproduce
 
-- For bugs, list the steps
+- list the steps
 - that reproduce the bug
 - using hyphens as bullet points
 
+<!--
 Describe how what happens differs from what you expected.
 
-<!-- If you can identify any relevant log snippets from _homeserver.log_, please include
+If you can identify any relevant log snippets from _homeserver.log_, please include
 those (please be careful to remove any personal or private data). Please surround them with
-``` (three backticks, on a line on their own), so that they are formatted legibly. -->
+``` (three backticks, on a line on their own), so that they are formatted legibly.
+-->
 
 ### Version information
 
 <!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
 
-- **Homeserver**: Was this issue identified on matrix.org or another homeserver?
+<!-- Was this issue identified on matrix.org or another homeserver? -->
+- **Homeserver**: 
 
 If not matrix.org:
-- **Version**:        What version of Synapse is running? <!-- 
+
+<!--
+What version of Synapse is running?
 You can find the Synapse version by inspecting the server headers (replace matrix.org with
 your own homeserver domain):
 $ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
 -->
-- **Install method**: package manager/git clone/pip      
-- **Platform**:       Tell us about the environment in which your homeserver is operating
-                      - distro, hardware, if it's running in a vm/container, etc.
+- **Version**: 
+
+- **Install method**: 
+<!-- examples: package manager/git clone/pip  -->
+
+- **Platform**: 
+<!--
+Tell us about the environment in which your homeserver is operating
+distro, hardware, if it's running in a vm/container, etc.
+-->
diff --git a/.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md b/.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md
new file mode 100644
index 0000000000..150a46f505
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md
@@ -0,0 +1,9 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+
+---
+
+**Description:**
+
+<!-- Describe here the feature you are requesting. -->
diff --git a/.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md b/.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md
new file mode 100644
index 0000000000..77581596c4
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md
@@ -0,0 +1,9 @@
+---
+name: Support request
+about: I need support for Synapse
+
+---
+
+# Please ask for support in [**#matrix:matrix.org**](https://matrix.to/#/#matrix:matrix.org)
+
+## Don't file an issue as a support request.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000000..1ead0d0030
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,7 @@
+### Pull Request Checklist
+
+<!-- Please read CONTRIBUTING.rst before submitting your pull request -->
+
+* [ ] Pull request is based on the develop branch
+* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#changelog)
+* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#sign-off)
diff --git a/.github/SUPPORT.md b/.github/SUPPORT.md
new file mode 100644
index 0000000000..7a4244f673
--- /dev/null
+++ b/.github/SUPPORT.md
@@ -0,0 +1,3 @@
+[**#matrix:matrix.org**](https://matrix.to/#/#matrix:matrix.org) is the official support room for Matrix, and can be accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html 
+
+It can also be access via IRC bridge at irc://irc.freenode.net/matrix or on the web here: https://webchat.freenode.net/?channels=matrix
diff --git a/.gitignore b/.gitignore
index 3b2252ad8a..a20f3e615d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,59 +1,36 @@
-*.pyc
-.*.swp
+# filename patterns
 *~
-*.lock
-
-.DS_Store
-_trial_temp/
-_trial_temp*/
-logs/
-dbs/
+.*.swp
+.#*
+*.deb
 *.egg
-dist/
-docs/build/
 *.egg-info
-
-cmdclient_config.json
-homeserver*.db
-homeserver*.log
-homeserver*.log.*
-homeserver*.pid
-homeserver*.yaml
-
-*.signing.key
-*.tls.crt
-*.tls.dh
-*.tls.key
-
-.coverage
-htmlcov
-
-demo/*/*.db
-demo/*/*.log
-demo/*/*.log.*
-demo/*/*.pid
-demo/media_store.*
-demo/etc
-
-uploads
-cache
-
-.idea/
-media_store/
-
+*.lock
+*.pyc
 *.tac
+_trial_temp/
+_trial_temp*/
 
-build/
-venv/
-venv*/
-*venv/
-
-localhost-800*/
-static/client/register/register_config.js
-.tox
-
-env/
-*.config
+# stuff that is likely to exist when you run a server locally
+/*.signing.key
+/*.tls.crt
+/*.tls.key
+/uploads
+/media_store/
+
+# IDEs
+/.idea/
+/.ropeproject/
+/.vscode/
+
+# build products
+/.coverage*
+!/.coveragerc
+/.tox
+/build/
+/coverage.*
+/dist/
+/docs/build/
+/htmlcov
+/pip-wheel-metadata/
 
-.vscode/
-.ropeproject/
diff --git a/.travis.yml b/.travis.yml
index fd41841c77..0d0fa7082a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,4 +1,4 @@
-sudo: false
+dist: xenial
 language: python
 
 cache:
@@ -22,49 +22,76 @@ branches:
     - master
     - develop
     - /^release-v/
+    - rav/pg95
 
+# When running the tox environments that call Twisted Trial, we can pass the -j
+# flag to run the tests concurrently. We set this to 2 for CPU bound tests
+# (SQLite) and 4 for I/O bound tests (PostgreSQL).
 matrix:
   fast_finish: true
   include:
-  - python: 2.7
-    env: TOX_ENV=packaging
-
-  - python: 3.6
-    env: TOX_ENV="pep8,check_isort"
+  - name: "pep8"
+    python: 3.6
+    env: TOX_ENV="pep8,check_isort,packaging"
 
-  - python: 2.7
-    env: TOX_ENV=py27
+  - name: "py2.7 / sqlite"
+    python: 2.7
+    env: TOX_ENV=py27,codecov TRIAL_FLAGS="-j 2"
 
-  - python: 2.7
-    env: TOX_ENV=py27-old
+  - name: "py2.7 / sqlite / olddeps"
+    python: 2.7
+    env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
 
-  - python: 2.7
-    env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
+  - name: "py2.7 / postgres9.5"
+    python: 2.7
+    addons:
+      postgresql: "9.5"
+    env: TOX_ENV=py27-postgres,codecov TRIAL_FLAGS="-j 4"
     services:
       - postgresql
 
-  - python: 3.5
-    env: TOX_ENV=py35
+  - name: "py3.5 / sqlite"
+    python: 3.5
+    env: TOX_ENV=py35,codecov TRIAL_FLAGS="-j 2"
 
-  - python: 3.6
-    env: TOX_ENV=py36
+  - name: "py3.7 / sqlite"
+    python: 3.7
+    env: TOX_ENV=py37,codecov TRIAL_FLAGS="-j 2"
 
-  - python: 3.6
-    env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
+  - name: "py3.7 / postgres9.4"
+    python: 3.7
+    addons:
+      postgresql: "9.4"
+    env: TOX_ENV=py37-postgres TRIAL_FLAGS="-j 4"
+    services:
+      - postgresql
+
+  - name: "py3.7 / postgres9.5"
+    python: 3.7
+    addons:
+      postgresql: "9.5"
+    env: TOX_ENV=py37-postgres,codecov TRIAL_FLAGS="-j 4"
     services:
       - postgresql
 
   - # we only need to check for the newsfragment if it's a PR build
     if: type = pull_request
+    name: "check-newsfragment"
     python: 3.6
-    env: TOX_ENV=check-newsfragment
-    script:
-      - git remote set-branches --add origin develop
-      - git fetch origin develop
-      - tox -e $TOX_ENV
+    script: scripts-dev/check-newsfragment
 
 install:
+  # this just logs the postgres version we will be testing against (if any)
+  - psql -At -U postgres -c 'select version();' || true
+
   - pip install tox
 
+  # if we don't have python3.6 in this environment, travis unhelpfully gives us
+  # a `python3.6` on our path which does nothing but spit out a warning. Tox
+  # tries to run it (even if we're not running a py36 env), so the build logs
+  # then have warnings which look like errors. To reduce the noise, remove the
+  # non-functional python3.6.
+  - ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
+
 script:
   - tox -e $TOX_ENV
diff --git a/AUTHORS.rst b/AUTHORS.rst
index 9a83d90153..d599aec74c 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -65,4 +65,7 @@ Pierre Jaury <pierre at jaury.eu>
 * Docker packaging
 
 Serban Constantin <serban.constantin at gmail dot com>
- * Small bug fix
\ No newline at end of file
+ * Small bug fix
+
+Jason Robinson <jasonr at matrix.org>
+ * Minor fixes
diff --git a/CHANGES.md b/CHANGES.md
index 8302610585..b25775d18e 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,402 @@
+Synapse 0.99.2 (2019-03-01)
+===========================
+
+Features
+--------
+
+- Added an HAProxy example in the reverse proxy documentation. Contributed by Benoît S. (“Benpro”). ([\#4541](https://github.com/matrix-org/synapse/issues/4541))
+- Add basic optional sentry integration. ([\#4632](https://github.com/matrix-org/synapse/issues/4632), [\#4694](https://github.com/matrix-org/synapse/issues/4694))
+- Transfer bans on room upgrade. ([\#4642](https://github.com/matrix-org/synapse/issues/4642))
+- Add configurable room list publishing rules. ([\#4647](https://github.com/matrix-org/synapse/issues/4647))
+- Support .well-known delegation when issuing certificates through ACME. ([\#4652](https://github.com/matrix-org/synapse/issues/4652))
+- Allow registration and login to be handled by a worker instance. ([\#4666](https://github.com/matrix-org/synapse/issues/4666), [\#4670](https://github.com/matrix-org/synapse/issues/4670), [\#4682](https://github.com/matrix-org/synapse/issues/4682))
+- Reduce the overhead of creating outbound federation connections over TLS by caching the TLS client options. ([\#4674](https://github.com/matrix-org/synapse/issues/4674))
+- Add prometheus metrics for number of outgoing EDUs, by type. ([\#4695](https://github.com/matrix-org/synapse/issues/4695))
+- Return correct error code when inviting a remote user to a room whose homeserver does not support the room version. ([\#4721](https://github.com/matrix-org/synapse/issues/4721))
+- Prevent showing rooms to other servers that were set to not federate. ([\#4746](https://github.com/matrix-org/synapse/issues/4746))
+
+
+Bugfixes
+--------
+
+- Fix possible exception when paginating. ([\#4263](https://github.com/matrix-org/synapse/issues/4263))
+- The dependency checker now correctly reports a version mismatch for optional
+  dependencies, instead of reporting the dependency missing. ([\#4450](https://github.com/matrix-org/synapse/issues/4450))
+- Set CORS headers on .well-known requests. ([\#4651](https://github.com/matrix-org/synapse/issues/4651))
+- Fix kicking guest users on guest access revocation in worker mode. ([\#4667](https://github.com/matrix-org/synapse/issues/4667))
+- Fix an issue in the database migration script where the
+  `e2e_room_keys.is_verified` column wasn't considered as
+  a boolean. ([\#4680](https://github.com/matrix-org/synapse/issues/4680))
+- Fix TaskStopped exceptions in logs when outbound requests time out. ([\#4690](https://github.com/matrix-org/synapse/issues/4690))
+- Fix ACME config for python 2. ([\#4717](https://github.com/matrix-org/synapse/issues/4717))
+- Fix paginating over federation persisting incorrect state. ([\#4718](https://github.com/matrix-org/synapse/issues/4718))
+
+
+Internal Changes
+----------------
+
+- Run `black` to reformat user directory code. ([\#4635](https://github.com/matrix-org/synapse/issues/4635))
+- Reduce number of exceptions we log. ([\#4643](https://github.com/matrix-org/synapse/issues/4643), [\#4668](https://github.com/matrix-org/synapse/issues/4668))
+- Introduce upsert batching functionality in the database layer. ([\#4644](https://github.com/matrix-org/synapse/issues/4644))
+- Fix various spelling mistakes. ([\#4657](https://github.com/matrix-org/synapse/issues/4657))
+- Cleanup request exception logging. ([\#4669](https://github.com/matrix-org/synapse/issues/4669), [\#4737](https://github.com/matrix-org/synapse/issues/4737), [\#4738](https://github.com/matrix-org/synapse/issues/4738))
+- Improve replication performance by reducing cache invalidation traffic. ([\#4671](https://github.com/matrix-org/synapse/issues/4671), [\#4715](https://github.com/matrix-org/synapse/issues/4715), [\#4748](https://github.com/matrix-org/synapse/issues/4748))
+- Test against Postgres 9.5 as well as 9.4. ([\#4676](https://github.com/matrix-org/synapse/issues/4676))
+- Run unit tests against python 3.7. ([\#4677](https://github.com/matrix-org/synapse/issues/4677))
+- Attempt to clarify installation instructions/config. ([\#4681](https://github.com/matrix-org/synapse/issues/4681))
+- Clean up gitignores. ([\#4688](https://github.com/matrix-org/synapse/issues/4688))
+- Minor tweaks to acme docs. ([\#4689](https://github.com/matrix-org/synapse/issues/4689))
+- Improve the logging in the pusher process. ([\#4691](https://github.com/matrix-org/synapse/issues/4691))
+- Better checks on newsfragments. ([\#4698](https://github.com/matrix-org/synapse/issues/4698), [\#4750](https://github.com/matrix-org/synapse/issues/4750))
+- Avoid some redundant work when processing read receipts. ([\#4706](https://github.com/matrix-org/synapse/issues/4706))
+- Run `push_receipts_to_remotes` as background job. ([\#4707](https://github.com/matrix-org/synapse/issues/4707))
+- Add prometheus metrics for number of badge update pushes. ([\#4709](https://github.com/matrix-org/synapse/issues/4709))
+- Reduce pusher logging on startup ([\#4716](https://github.com/matrix-org/synapse/issues/4716))
+- Don't log exceptions when failing to fetch remote server keys. ([\#4722](https://github.com/matrix-org/synapse/issues/4722))
+- Correctly proxy exception in frontend_proxy worker. ([\#4723](https://github.com/matrix-org/synapse/issues/4723))
+- Add database version to phonehome stats. ([\#4753](https://github.com/matrix-org/synapse/issues/4753))
+
+
+Synapse 0.99.1.1 (2019-02-14)
+=============================
+
+Bugfixes
+--------
+
+- Fix "TypeError: '>' not supported" when starting without an existing certificate.
+  Fix a bug where an existing certificate would be reprovisoned every day. ([\#4648](https://github.com/matrix-org/synapse/issues/4648))
+
+
+Synapse 0.99.1 (2019-02-14)
+===========================
+
+Features
+--------
+
+- Include m.room.encryption on invites by default ([\#3902](https://github.com/matrix-org/synapse/issues/3902))
+- Federation OpenID listener resource can now be activated even if federation is disabled ([\#4420](https://github.com/matrix-org/synapse/issues/4420))
+- Synapse's ACME support will now correctly reprovision a certificate that approaches its expiry while Synapse is running. ([\#4522](https://github.com/matrix-org/synapse/issues/4522))
+- Add ability to update backup versions ([\#4580](https://github.com/matrix-org/synapse/issues/4580))
+- Allow the "unavailable" presence status for /sync.
+  This change makes Synapse compliant with r0.4.0 of the Client-Server specification. ([\#4592](https://github.com/matrix-org/synapse/issues/4592))
+- There is no longer any need to specify `no_tls`: it is inferred from the absence of TLS listeners ([\#4613](https://github.com/matrix-org/synapse/issues/4613), [\#4615](https://github.com/matrix-org/synapse/issues/4615), [\#4617](https://github.com/matrix-org/synapse/issues/4617), [\#4636](https://github.com/matrix-org/synapse/issues/4636))
+- The default configuration no longer requires TLS certificates. ([\#4614](https://github.com/matrix-org/synapse/issues/4614))
+
+
+Bugfixes
+--------
+
+- Copy over room federation ability on room upgrade. ([\#4530](https://github.com/matrix-org/synapse/issues/4530))
+- Fix noisy "twisted.internet.task.TaskStopped" errors in logs ([\#4546](https://github.com/matrix-org/synapse/issues/4546))
+- Synapse is now tolerant of the `tls_fingerprints` option being None or not specified. ([\#4589](https://github.com/matrix-org/synapse/issues/4589))
+- Fix 'no unique or exclusion constraint' error ([\#4591](https://github.com/matrix-org/synapse/issues/4591))
+- Transfer Server ACLs on room upgrade. ([\#4608](https://github.com/matrix-org/synapse/issues/4608))
+- Fix failure to start when not TLS certificate was given even if TLS was disabled. ([\#4618](https://github.com/matrix-org/synapse/issues/4618))
+- Fix self-signed cert notice from generate-config. ([\#4625](https://github.com/matrix-org/synapse/issues/4625))
+- Fix performance of `user_ips` table deduplication background update ([\#4626](https://github.com/matrix-org/synapse/issues/4626), [\#4627](https://github.com/matrix-org/synapse/issues/4627))
+
+
+Internal Changes
+----------------
+
+- Change the user directory state query to use a filtered call to the db instead of a generic one. ([\#4462](https://github.com/matrix-org/synapse/issues/4462))
+- Reject federation transactions if they include more than 50 PDUs or 100 EDUs. ([\#4513](https://github.com/matrix-org/synapse/issues/4513))
+- Reduce duplication of ``synapse.app`` code. ([\#4567](https://github.com/matrix-org/synapse/issues/4567))
+- Fix docker upload job to push -py2 images. ([\#4576](https://github.com/matrix-org/synapse/issues/4576))
+- Add port configuration information to ACME instructions. ([\#4578](https://github.com/matrix-org/synapse/issues/4578))
+- Update MSC1711 FAQ to calrify .well-known usage ([\#4584](https://github.com/matrix-org/synapse/issues/4584))
+- Clean up default listener configuration ([\#4586](https://github.com/matrix-org/synapse/issues/4586))
+- Clarifications for reverse proxy docs ([\#4607](https://github.com/matrix-org/synapse/issues/4607))
+- Move ClientTLSOptionsFactory init out of `refresh_certificates` ([\#4611](https://github.com/matrix-org/synapse/issues/4611))
+- Fail cleanly if listener config lacks a 'port' ([\#4616](https://github.com/matrix-org/synapse/issues/4616))
+- Remove redundant entries from docker config ([\#4619](https://github.com/matrix-org/synapse/issues/4619))
+- README updates ([\#4621](https://github.com/matrix-org/synapse/issues/4621))
+
+
+Synapse 0.99.0 (2019-02-05)
+===========================
+
+Synapse v0.99.x is a precursor to the upcoming Synapse v1.0 release. It contains foundational changes to room architecture and the federation security model necessary to support the upcoming r0 release of the Server to Server API.
+
+Features
+--------
+
+- Synapse's cipher string has been updated to require ECDH key exchange. Configuring and generating dh_params is no longer required, and they will be ignored. ([\#4229](https://github.com/matrix-org/synapse/issues/4229))
+- Synapse can now automatically provision TLS certificates via ACME (the protocol used by CAs like Let's Encrypt). ([\#4384](https://github.com/matrix-org/synapse/issues/4384), [\#4492](https://github.com/matrix-org/synapse/issues/4492), [\#4525](https://github.com/matrix-org/synapse/issues/4525), [\#4572](https://github.com/matrix-org/synapse/issues/4572), [\#4564](https://github.com/matrix-org/synapse/issues/4564), [\#4566](https://github.com/matrix-org/synapse/issues/4566), [\#4547](https://github.com/matrix-org/synapse/issues/4547), [\#4557](https://github.com/matrix-org/synapse/issues/4557))
+- Implement MSC1708 (.well-known routing for server-server federation) ([\#4408](https://github.com/matrix-org/synapse/issues/4408), [\#4409](https://github.com/matrix-org/synapse/issues/4409), [\#4426](https://github.com/matrix-org/synapse/issues/4426), [\#4427](https://github.com/matrix-org/synapse/issues/4427), [\#4428](https://github.com/matrix-org/synapse/issues/4428), [\#4464](https://github.com/matrix-org/synapse/issues/4464), [\#4468](https://github.com/matrix-org/synapse/issues/4468), [\#4487](https://github.com/matrix-org/synapse/issues/4487), [\#4488](https://github.com/matrix-org/synapse/issues/4488), [\#4489](https://github.com/matrix-org/synapse/issues/4489), [\#4497](https://github.com/matrix-org/synapse/issues/4497), [\#4511](https://github.com/matrix-org/synapse/issues/4511), [\#4516](https://github.com/matrix-org/synapse/issues/4516), [\#4520](https://github.com/matrix-org/synapse/issues/4520), [\#4521](https://github.com/matrix-org/synapse/issues/4521), [\#4539](https://github.com/matrix-org/synapse/issues/4539), [\#4542](https://github.com/matrix-org/synapse/issues/4542), [\#4544](https://github.com/matrix-org/synapse/issues/4544))
+- Search now includes results from predecessor rooms after a room upgrade. ([\#4415](https://github.com/matrix-org/synapse/issues/4415))
+- Config option to disable requesting MSISDN on registration. ([\#4423](https://github.com/matrix-org/synapse/issues/4423))
+- Add a metric for tracking event stream position of the user directory. ([\#4445](https://github.com/matrix-org/synapse/issues/4445))
+- Support exposing server capabilities in CS API (MSC1753, MSC1804) ([\#4472](https://github.com/matrix-org/synapse/issues/4472), [81b7e7eed](https://github.com/matrix-org/synapse/commit/81b7e7eed323f55d6550e7a270a9dc2c4c7b0fe0)))
+- Add support for room version 3 ([\#4483](https://github.com/matrix-org/synapse/issues/4483), [\#4499](https://github.com/matrix-org/synapse/issues/4499), [\#4515](https://github.com/matrix-org/synapse/issues/4515), [\#4523](https://github.com/matrix-org/synapse/issues/4523), [\#4535](https://github.com/matrix-org/synapse/issues/4535))
+- Synapse will now reload TLS certificates from disk upon SIGHUP. ([\#4495](https://github.com/matrix-org/synapse/issues/4495), [\#4524](https://github.com/matrix-org/synapse/issues/4524))
+- The matrixdotorg/synapse Docker images now use Python 3 by default. ([\#4558](https://github.com/matrix-org/synapse/issues/4558))
+
+Bugfixes
+--------
+
+- Prevent users with access tokens predating the introduction of device IDs from creating spurious entries in the user_ips table. ([\#4369](https://github.com/matrix-org/synapse/issues/4369))
+- Fix typo in ALL_USER_TYPES definition to ensure type is a tuple ([\#4392](https://github.com/matrix-org/synapse/issues/4392))
+- Fix high CPU usage due to remote devicelist updates ([\#4397](https://github.com/matrix-org/synapse/issues/4397))
+- Fix potential bug where creating or joining a room could fail ([\#4404](https://github.com/matrix-org/synapse/issues/4404))
+- Fix bug when rejecting remote invites ([\#4405](https://github.com/matrix-org/synapse/issues/4405), [\#4527](https://github.com/matrix-org/synapse/issues/4527))
+- Fix incorrect logcontexts after a Deferred was cancelled ([\#4407](https://github.com/matrix-org/synapse/issues/4407))
+- Ensure encrypted room state is persisted across room upgrades. ([\#4411](https://github.com/matrix-org/synapse/issues/4411))
+- Copy over whether a room is a direct message and any associated room tags on room upgrade. ([\#4412](https://github.com/matrix-org/synapse/issues/4412))
+- Fix None guard in calling config.server.is_threepid_reserved ([\#4435](https://github.com/matrix-org/synapse/issues/4435))
+- Don't send IP addresses as SNI ([\#4452](https://github.com/matrix-org/synapse/issues/4452))
+- Fix UnboundLocalError in post_urlencoded_get_json ([\#4460](https://github.com/matrix-org/synapse/issues/4460))
+- Add a timeout to filtered room directory queries. ([\#4461](https://github.com/matrix-org/synapse/issues/4461))
+- Workaround for login error when using both LDAP and internal authentication. ([\#4486](https://github.com/matrix-org/synapse/issues/4486))
+- Fix a bug where setting a relative consent directory path would cause a crash. ([\#4512](https://github.com/matrix-org/synapse/issues/4512))
+
+
+Deprecations and Removals
+-------------------------
+
+- Synapse no longer generates self-signed TLS certificates when generating a configuration file. ([\#4509](https://github.com/matrix-org/synapse/issues/4509))
+
+
+Improved Documentation
+----------------------
+
+- Update debian installation instructions ([\#4526](https://github.com/matrix-org/synapse/issues/4526))
+
+
+Internal Changes
+----------------
+
+- Synapse will now take advantage of native UPSERT functionality in PostgreSQL 9.5+ and SQLite 3.24+. ([\#4306](https://github.com/matrix-org/synapse/issues/4306), [\#4459](https://github.com/matrix-org/synapse/issues/4459), [\#4466](https://github.com/matrix-org/synapse/issues/4466), [\#4471](https://github.com/matrix-org/synapse/issues/4471), [\#4477](https://github.com/matrix-org/synapse/issues/4477), [\#4505](https://github.com/matrix-org/synapse/issues/4505))
+- Update README to use the new virtualenv everywhere ([\#4342](https://github.com/matrix-org/synapse/issues/4342))
+- Add better logging for unexpected errors while sending transactions ([\#4368](https://github.com/matrix-org/synapse/issues/4368))
+- Apply a unique index to the user_ips table, preventing duplicates. ([\#4370](https://github.com/matrix-org/synapse/issues/4370), [\#4432](https://github.com/matrix-org/synapse/issues/4432), [\#4434](https://github.com/matrix-org/synapse/issues/4434))
+- Silence travis-ci build warnings by removing non-functional python3.6 ([\#4377](https://github.com/matrix-org/synapse/issues/4377))
+- Fix a comment in the generated config file ([\#4387](https://github.com/matrix-org/synapse/issues/4387))
+- Add ground work for implementing future federation API versions ([\#4390](https://github.com/matrix-org/synapse/issues/4390))
+- Update dependencies on msgpack and pymacaroons to use the up-to-date packages. ([\#4399](https://github.com/matrix-org/synapse/issues/4399))
+- Tweak codecov settings to make them less loud. ([\#4400](https://github.com/matrix-org/synapse/issues/4400))
+- Implement server support for MSC1794 - Federation v2 Invite API ([\#4402](https://github.com/matrix-org/synapse/issues/4402))
+- debian package: symlink to explicit python version ([\#4433](https://github.com/matrix-org/synapse/issues/4433))
+- Add infrastructure to support different event formats ([\#4437](https://github.com/matrix-org/synapse/issues/4437), [\#4447](https://github.com/matrix-org/synapse/issues/4447), [\#4448](https://github.com/matrix-org/synapse/issues/4448), [\#4470](https://github.com/matrix-org/synapse/issues/4470), [\#4481](https://github.com/matrix-org/synapse/issues/4481), [\#4482](https://github.com/matrix-org/synapse/issues/4482), [\#4493](https://github.com/matrix-org/synapse/issues/4493), [\#4494](https://github.com/matrix-org/synapse/issues/4494), [\#4496](https://github.com/matrix-org/synapse/issues/4496), [\#4510](https://github.com/matrix-org/synapse/issues/4510), [\#4514](https://github.com/matrix-org/synapse/issues/4514))
+- Generate the debian config during build ([\#4444](https://github.com/matrix-org/synapse/issues/4444))
+- Clarify documentation for the `public_baseurl` config param ([\#4458](https://github.com/matrix-org/synapse/issues/4458), [\#4498](https://github.com/matrix-org/synapse/issues/4498))
+- Fix quoting for allowed_local_3pids example config ([\#4476](https://github.com/matrix-org/synapse/issues/4476))
+- Remove deprecated --process-dependency-links option from UPGRADE.rst ([\#4485](https://github.com/matrix-org/synapse/issues/4485))
+- Make it possible to set the log level for tests via an environment variable ([\#4506](https://github.com/matrix-org/synapse/issues/4506))
+- Reduce the log level of linearizer lock acquirement to DEBUG. ([\#4507](https://github.com/matrix-org/synapse/issues/4507))
+- Fix code to comply with linting in PyFlakes 3.7.1. ([\#4519](https://github.com/matrix-org/synapse/issues/4519))
+- Add some debug for membership syncing issues ([\#4538](https://github.com/matrix-org/synapse/issues/4538))
+- Docker: only copy what we need to the build image ([\#4562](https://github.com/matrix-org/synapse/issues/4562))
+
+
+Synapse 0.34.1.1 (2019-01-11)
+=============================
+
+This release fixes CVE-2019-5885 and is recommended for all users of Synapse 0.34.1.
+
+This release is compatible with Python 2.7 and 3.5+. Python 3.7 is fully supported.
+
+Bugfixes
+--------
+
+- Fix spontaneous logout on upgrade
+  ([\#4374](https://github.com/matrix-org/synapse/issues/4374))
+
+
+Synapse 0.34.1 (2019-01-09)
+===========================
+
+Internal Changes
+----------------
+
+- Add better logging for unexpected errors while sending transactions ([\#4361](https://github.com/matrix-org/synapse/issues/4361), [\#4362](https://github.com/matrix-org/synapse/issues/4362))
+
+
+Synapse 0.34.1rc1 (2019-01-08)
+==============================
+
+Features
+--------
+
+- Special-case a support user for use in verifying behaviour of a given server. The support user does not appear in user directory or monthly active user counts. ([\#4141](https://github.com/matrix-org/synapse/issues/4141), [\#4344](https://github.com/matrix-org/synapse/issues/4344))
+- Support for serving .well-known files ([\#4262](https://github.com/matrix-org/synapse/issues/4262))
+- Rework SAML2 authentication ([\#4265](https://github.com/matrix-org/synapse/issues/4265), [\#4267](https://github.com/matrix-org/synapse/issues/4267))
+- SAML2 authentication: Initialise user display name from SAML2 data ([\#4272](https://github.com/matrix-org/synapse/issues/4272))
+- Synapse can now have its conditional/extra dependencies installed by pip. This functionality can be used by using `pip install matrix-synapse[feature]`, where feature is a comma separated list with the possible values `email.enable_notifs`, `matrix-synapse-ldap3`, `postgres`, `resources.consent`, `saml2`, `url_preview`, and `test`. If you want to install all optional dependencies, you can use "all" instead. ([\#4298](https://github.com/matrix-org/synapse/issues/4298), [\#4325](https://github.com/matrix-org/synapse/issues/4325), [\#4327](https://github.com/matrix-org/synapse/issues/4327))
+- Add routes for reading account data. ([\#4303](https://github.com/matrix-org/synapse/issues/4303))
+- Add opt-in support for v2 rooms ([\#4307](https://github.com/matrix-org/synapse/issues/4307))
+- Add a script to generate a clean config file ([\#4315](https://github.com/matrix-org/synapse/issues/4315))
+- Return server data in /login response ([\#4319](https://github.com/matrix-org/synapse/issues/4319))
+
+
+Bugfixes
+--------
+
+- Fix contains_url check to be consistent with other instances in code-base and check that value is an instance of string. ([\#3405](https://github.com/matrix-org/synapse/issues/3405))
+- Fix CAS login when username is not valid in an MXID ([\#4264](https://github.com/matrix-org/synapse/issues/4264))
+- Send CORS headers for /media/config ([\#4279](https://github.com/matrix-org/synapse/issues/4279))
+- Add 'sandbox' to CSP for media reprository ([\#4284](https://github.com/matrix-org/synapse/issues/4284))
+- Make the new landing page prettier. ([\#4294](https://github.com/matrix-org/synapse/issues/4294))
+- Fix deleting E2E room keys when using old SQLite versions. ([\#4295](https://github.com/matrix-org/synapse/issues/4295))
+- The metric synapse_admin_mau:current previously did not update when config.mau_stats_only was set to True ([\#4305](https://github.com/matrix-org/synapse/issues/4305))
+- Fixed per-room account data filters ([\#4309](https://github.com/matrix-org/synapse/issues/4309))
+- Fix indentation in default config ([\#4313](https://github.com/matrix-org/synapse/issues/4313))
+- Fix synapse:latest docker upload ([\#4316](https://github.com/matrix-org/synapse/issues/4316))
+- Fix test_metric.py compatibility with prometheus_client 0.5. Contributed by Maarten de Vries <maarten@de-vri.es>. ([\#4317](https://github.com/matrix-org/synapse/issues/4317))
+- Avoid packaging _trial_temp directory in -py3 debian packages ([\#4326](https://github.com/matrix-org/synapse/issues/4326))
+- Check jinja version for consent resource ([\#4327](https://github.com/matrix-org/synapse/issues/4327))
+- fix NPE in /messages by checking if all events were filtered out ([\#4330](https://github.com/matrix-org/synapse/issues/4330))
+- Fix `python -m synapse.config` on Python 3. ([\#4356](https://github.com/matrix-org/synapse/issues/4356))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the deprecated v1/register API on Python 2. It was never ported to Python 3. ([\#4334](https://github.com/matrix-org/synapse/issues/4334))
+
+
+Internal Changes
+----------------
+
+- Getting URL previews of IP addresses no longer fails on Python 3. ([\#4215](https://github.com/matrix-org/synapse/issues/4215))
+- drop undocumented dependency on dateutil ([\#4266](https://github.com/matrix-org/synapse/issues/4266))
+- Update the example systemd config to use a virtualenv ([\#4273](https://github.com/matrix-org/synapse/issues/4273))
+- Update link to kernel DCO guide ([\#4274](https://github.com/matrix-org/synapse/issues/4274))
+- Make isort tox check print diff when it fails ([\#4283](https://github.com/matrix-org/synapse/issues/4283))
+- Log room_id in Unknown room errors ([\#4297](https://github.com/matrix-org/synapse/issues/4297))
+- Documentation improvements for coturn setup. Contributed by Krithin Sitaram. ([\#4333](https://github.com/matrix-org/synapse/issues/4333))
+- Update pull request template to use absolute links ([\#4341](https://github.com/matrix-org/synapse/issues/4341))
+- Update README to not lie about required restart when updating TLS certificates ([\#4343](https://github.com/matrix-org/synapse/issues/4343))
+- Update debian packaging for compatibility with transitional package ([\#4349](https://github.com/matrix-org/synapse/issues/4349))
+- Fix command hint to generate a config file when trying to start without a config file ([\#4353](https://github.com/matrix-org/synapse/issues/4353))
+- Add better logging for unexpected errors while sending transactions ([\#4358](https://github.com/matrix-org/synapse/issues/4358))
+
+
+Synapse 0.34.0 (2018-12-20)
+===========================
+
+Synapse 0.34.0 is the first release to fully support Python 3. Synapse will now
+run on Python versions 3.5 or 3.6 (as well as 2.7). Support for Python 3.7
+remains experimental.
+
+We recommend upgrading to Python 3, but make sure to read the [upgrade
+notes](UPGRADE.rst#upgrading-to-v0340) when doing so.
+
+Features
+--------
+
+- Add 'sandbox' to CSP for media reprository ([\#4284](https://github.com/matrix-org/synapse/issues/4284))
+- Make the new landing page prettier. ([\#4294](https://github.com/matrix-org/synapse/issues/4294))
+- Fix deleting E2E room keys when using old SQLite versions. ([\#4295](https://github.com/matrix-org/synapse/issues/4295))
+- Add a welcome page for the client API port. Credit to @krombel! ([\#4289](https://github.com/matrix-org/synapse/issues/4289))
+- Remove Matrix console from the default distribution ([\#4290](https://github.com/matrix-org/synapse/issues/4290))
+- Add option to track MAU stats (but not limit people) ([\#3830](https://github.com/matrix-org/synapse/issues/3830))
+- Add an option to enable recording IPs for appservice users ([\#3831](https://github.com/matrix-org/synapse/issues/3831))
+- Rename login type `m.login.cas` to `m.login.sso` ([\#4220](https://github.com/matrix-org/synapse/issues/4220))
+- Add an option to disable search for homeservers that may not be interested in it. ([\#4230](https://github.com/matrix-org/synapse/issues/4230))
+
+
+Bugfixes
+--------
+
+- Pushrules can now again be made with non-ASCII rule IDs. ([\#4165](https://github.com/matrix-org/synapse/issues/4165))
+- The media repository now no longer fails to decode UTF-8 filenames when downloading remote media. ([\#4176](https://github.com/matrix-org/synapse/issues/4176))
+- URL previews now correctly decode non-UTF-8 text if the header contains a `<meta http-equiv="Content-Type"` header. ([\#4183](https://github.com/matrix-org/synapse/issues/4183))
+- Fix an issue where public consent URLs had two slashes. ([\#4192](https://github.com/matrix-org/synapse/issues/4192))
+- Fallback auth now accepts the session parameter on Python 3. ([\#4197](https://github.com/matrix-org/synapse/issues/4197))
+- Remove riot.im from the list of trusted Identity Servers in the default configuration ([\#4207](https://github.com/matrix-org/synapse/issues/4207))
+- fix start up failure when mau_limit_reserved_threepids set and db is postgres ([\#4211](https://github.com/matrix-org/synapse/issues/4211))
+- Fix auto join failures for servers that require user consent ([\#4223](https://github.com/matrix-org/synapse/issues/4223))
+- Fix exception caused by non-ascii event IDs ([\#4241](https://github.com/matrix-org/synapse/issues/4241))
+- Pushers can now be unsubscribed from on Python 3. ([\#4250](https://github.com/matrix-org/synapse/issues/4250))
+- Fix UnicodeDecodeError when postgres is configured to give non-English errors ([\#4253](https://github.com/matrix-org/synapse/issues/4253))
+
+
+Internal Changes
+----------------
+
+- Debian packages utilising a virtualenv with bundled dependencies can now be built. ([\#4212](https://github.com/matrix-org/synapse/issues/4212))
+- Disable pager when running git-show in CI ([\#4291](https://github.com/matrix-org/synapse/issues/4291))
+- A coveragerc file has been added. ([\#4180](https://github.com/matrix-org/synapse/issues/4180))
+- Add a GitHub pull request template and add multiple issue templates ([\#4182](https://github.com/matrix-org/synapse/issues/4182))
+- Update README to reflect the fact that [\#1491](https://github.com/matrix-org/synapse/issues/1491) is fixed ([\#4188](https://github.com/matrix-org/synapse/issues/4188))
+- Run the AS senders as background processes to fix warnings ([\#4189](https://github.com/matrix-org/synapse/issues/4189))
+- Add some diagnostics to the tests to detect logcontext problems ([\#4190](https://github.com/matrix-org/synapse/issues/4190))
+- Add missing `jpeg` package prerequisite for OpenBSD in README. ([\#4193](https://github.com/matrix-org/synapse/issues/4193))
+- Add a note saying you need to manually reclaim disk space after using the Purge History API ([\#4200](https://github.com/matrix-org/synapse/issues/4200))
+- More logcontext checking in unittests ([\#4205](https://github.com/matrix-org/synapse/issues/4205))
+- Ignore `__pycache__` directories in the database schema folder ([\#4214](https://github.com/matrix-org/synapse/issues/4214))
+- Add note to UPGRADE.rst about removing riot.im from list of trusted identity servers ([\#4224](https://github.com/matrix-org/synapse/issues/4224))
+- Added automated coverage reporting to CI. ([\#4225](https://github.com/matrix-org/synapse/issues/4225))
+- Garbage-collect after each unit test to fix logcontext leaks ([\#4227](https://github.com/matrix-org/synapse/issues/4227))
+- add more detail to logging regarding "More than one row matched" error ([\#4234](https://github.com/matrix-org/synapse/issues/4234))
+- Drop sent_transactions table ([\#4244](https://github.com/matrix-org/synapse/issues/4244))
+- Add a basic .editorconfig ([\#4257](https://github.com/matrix-org/synapse/issues/4257))
+- Update README.rst and UPGRADE.rst for Python 3. ([\#4260](https://github.com/matrix-org/synapse/issues/4260))
+- Remove obsolete `verbose` and `log_file` settings from `homeserver.yaml` for Docker image. ([\#4261](https://github.com/matrix-org/synapse/issues/4261))
+
+
+Synapse 0.33.9 (2018-11-19)
+===========================
+
+No significant changes.
+
+
+Synapse 0.33.9rc1 (2018-11-14)
+==============================
+
+Features
+--------
+
+- Include flags to optionally add `m.login.terms` to the registration flow when consent tracking is enabled. ([\#4004](https://github.com/matrix-org/synapse/issues/4004), [\#4133](https://github.com/matrix-org/synapse/issues/4133), [\#4142](https://github.com/matrix-org/synapse/issues/4142), [\#4184](https://github.com/matrix-org/synapse/issues/4184))
+- Support for replacing rooms with new ones ([\#4091](https://github.com/matrix-org/synapse/issues/4091), [\#4099](https://github.com/matrix-org/synapse/issues/4099), [\#4100](https://github.com/matrix-org/synapse/issues/4100), [\#4101](https://github.com/matrix-org/synapse/issues/4101))
+
+
+Bugfixes
+--------
+
+- Fix exceptions when using the email mailer on Python 3. ([\#4095](https://github.com/matrix-org/synapse/issues/4095))
+- Fix e2e key backup with more than 9 backup versions ([\#4113](https://github.com/matrix-org/synapse/issues/4113))
+- Searches that request profile info now no longer fail with a 500. ([\#4122](https://github.com/matrix-org/synapse/issues/4122))
+- fix return code of empty key backups ([\#4123](https://github.com/matrix-org/synapse/issues/4123))
+- If the typing stream ID goes backwards (as on a worker when the master restarts), the worker's typing handler will no longer erroneously report rooms containing new typing events. ([\#4127](https://github.com/matrix-org/synapse/issues/4127))
+- Fix table lock of device_lists_remote_cache which could freeze the application ([\#4132](https://github.com/matrix-org/synapse/issues/4132))
+- Fix exception when using state res v2 algorithm ([\#4135](https://github.com/matrix-org/synapse/issues/4135))
+- Generating the user consent URI no longer fails on Python 3. ([\#4140](https://github.com/matrix-org/synapse/issues/4140), [\#4163](https://github.com/matrix-org/synapse/issues/4163))
+- Loading URL previews from the DB cache on Postgres will no longer cause Unicode type errors when responding to the request, and URL previews will no longer fail if the remote server returns a Content-Type header with the chartype in quotes. ([\#4157](https://github.com/matrix-org/synapse/issues/4157))
+- The hash_password script now works on Python 3. ([\#4161](https://github.com/matrix-org/synapse/issues/4161))
+- Fix noop checks when updating device keys, reducing spurious device list update notifications. ([\#4164](https://github.com/matrix-org/synapse/issues/4164))
+
+
+Deprecations and Removals
+-------------------------
+
+- The disused and un-specced identicon generator has been removed. ([\#4106](https://github.com/matrix-org/synapse/issues/4106))
+- The obsolete and non-functional /pull federation endpoint has been removed. ([\#4118](https://github.com/matrix-org/synapse/issues/4118))
+- The deprecated v1 key exchange endpoints have been removed. ([\#4119](https://github.com/matrix-org/synapse/issues/4119))
+- Synapse will no longer fetch keys using the fallback deprecated v1 key exchange method and will now always use v2. ([\#4120](https://github.com/matrix-org/synapse/issues/4120))
+
+
+Internal Changes
+----------------
+
+- Fix build of Docker image with docker-compose ([\#3778](https://github.com/matrix-org/synapse/issues/3778))
+- Delete unreferenced state groups during history purge ([\#4006](https://github.com/matrix-org/synapse/issues/4006))
+- The "Received rdata" log messages on workers is now logged at DEBUG, not INFO. ([\#4108](https://github.com/matrix-org/synapse/issues/4108))
+- Reduce replication traffic for device lists ([\#4109](https://github.com/matrix-org/synapse/issues/4109))
+- Fix `synapse_replication_tcp_protocol_*_commands` metric label to be full command name, rather than just the first character ([\#4110](https://github.com/matrix-org/synapse/issues/4110))
+- Log some bits about room creation ([\#4121](https://github.com/matrix-org/synapse/issues/4121))
+- Fix `tox` failure on old systems ([\#4124](https://github.com/matrix-org/synapse/issues/4124))
+- Add STATE_V2_TEST room version ([\#4128](https://github.com/matrix-org/synapse/issues/4128))
+- Clean up event accesses and tests ([\#4137](https://github.com/matrix-org/synapse/issues/4137))
+- The default logging config will now set an explicit log file encoding of UTF-8. ([\#4138](https://github.com/matrix-org/synapse/issues/4138))
+- Add helpers functions for getting prev and auth events of an event ([\#4139](https://github.com/matrix-org/synapse/issues/4139))
+- Add some tests for the HTTP pusher. ([\#4149](https://github.com/matrix-org/synapse/issues/4149))
+- add purge_history.sh and purge_remote_media.sh scripts to contrib/ ([\#4155](https://github.com/matrix-org/synapse/issues/4155))
+- HTTP tests have been refactored to contain less boilerplate. ([\#4156](https://github.com/matrix-org/synapse/issues/4156))
+- Drop incoming events from federation for unknown rooms ([\#4165](https://github.com/matrix-org/synapse/issues/4165))
+
+
 Synapse 0.33.8 (2018-11-01)
 ===========================
 
@@ -10,7 +409,7 @@ Synapse 0.33.8rc2 (2018-10-31)
 Bugfixes
 --------
 
-- Searches that request profile info now no longer fail with a 500. Fixes 
+- Searches that request profile info now no longer fail with a 500. Fixes
   a regression in 0.33.8rc1. ([\#4122](https://github.com/matrix-org/synapse/issues/4122))
 
 
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 6ef7d48dc7..9a283ced6e 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -30,7 +30,7 @@ use github's pull request workflow to review the contribution, and either ask
 you to make any refinements needed or merge it and make them ourselves. The
 changes will then land on master when we next do a release.
 
-We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI 
+We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
 <https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
 pull requests to synapse get automatically tested by Travis and CircleCI.
 If your change breaks the build, this will be shown in GitHub, so please
@@ -74,16 +74,39 @@ entry. These are managed by Towncrier
 To create a changelog entry, make a new file in the ``changelog.d``
 file named in the format of ``PRnumber.type``. The type can be
 one of ``feature``, ``bugfix``, ``removal`` (also used for
-deprecations), or ``misc`` (for internal-only changes). The content of
-the file is your changelog entry, which can contain Markdown
-formatting. Adding credits to the changelog is encouraged, we value
-your contributions and would like to have you shouted out in the
-release notes!
+deprecations), or ``misc`` (for internal-only changes).
+
+The content of the file is your changelog entry, which can contain Markdown
+formatting. The entry should end with a full stop ('.') for consistency.
+
+Adding credits to the changelog is encouraged, we value your
+contributions and would like to have you shouted out in the release notes!
 
 For example, a fix in PR #1234 would have its changelog entry in
 ``changelog.d/1234.bugfix``, and contain content like "The security levels of
 Florbs are now validated when recieved over federation. Contributed by Jane
-Matrix".
+Matrix.".
+
+Debian changelog
+----------------
+
+Changes which affect the debian packaging files (in ``debian``) are an
+exception.
+
+In this case, you will need to add an entry to the debian changelog for the
+next release. For this, run the following command::
+
+  dch
+
+This will make up a new version number (if there isn't already an unreleased
+version in flight), and open an editor where you can add a new changelog entry.
+(Our release process will ensure that the version number and maintainer name is
+corrected for the release.)
+
+If your change affects both the debian packaging *and* files outside the debian
+directory, you will need both a regular newsfragment *and* an entry in the
+debian changelog. (Though typically such changes should be submitted as two
+separate pull requests.)
 
 Attribution
 ~~~~~~~~~~~
@@ -102,7 +125,7 @@ Sign off
 In order to have a concrete record that your contribution is intentional
 and you agree to license it under the same terms as the project's license, we've adopted the
 same lightweight approach that the Linux Kernel
-(https://www.kernel.org/doc/Documentation/SubmittingPatches), Docker
+`submitting patches process <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>`_, Docker
 (https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
 projects use: the DCO (Developer Certificate of Origin:
 http://developercertificate.org/). This is a simple declaration that you wrote
diff --git a/INSTALL.md b/INSTALL.md
new file mode 100644
index 0000000000..6105cd6db8
--- /dev/null
+++ b/INSTALL.md
@@ -0,0 +1,430 @@
+* [Installing Synapse](#installing-synapse)
+  * [Installing from source](#installing-from-source)
+    * [Platform-Specific Instructions](#platform-specific-instructions)
+    * [Troubleshooting Installation](#troubleshooting-installation)
+  * [Prebuilt packages](#prebuilt-packages)
+* [Setting up Synapse](#setting-up-synapse)
+  * [TLS certificates](#tls-certificates)
+  * [Registering a user](#registering-a-user)
+  * [Setting up a TURN server](#setting-up-a-turn-server)
+  * [URL previews](#url-previews)
+
+# Installing Synapse
+
+## Installing from source
+
+(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
+
+System requirements:
+
+- POSIX-compliant system (tested on Linux & OS X)
+- Python 3.5, 3.6, 3.7, or 2.7
+- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
+
+Synapse is written in Python but some of the libraries it uses are written in
+C. So before we can install Synapse itself we need a working C compiler and the
+header files for Python C extensions. See [Platform-Specific
+Instructions](#platform-specific-instructions) for information on installing
+these on various platforms.
+
+To install the Synapse homeserver run:
+
+```
+mkdir -p ~/synapse
+virtualenv -p python3 ~/synapse/env
+source ~/synapse/env/bin/activate
+pip install --upgrade pip
+pip install --upgrade setuptools
+pip install matrix-synapse[all]
+```
+
+This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
+and install it, along with the python libraries it uses, into a virtual environment
+under `~/synapse/env`.  Feel free to pick a different directory if you
+prefer.
+
+This Synapse installation can then be later upgraded by using pip again with the
+update flag:
+
+```
+source ~/synapse/env/bin/activate
+pip install -U matrix-synapse[all]
+```
+
+Before you can start Synapse, you will need to generate a configuration
+file. To do this, run (in your virtualenv, as before)::
+
+```
+cd ~/synapse
+python -m synapse.app.homeserver \
+    --server-name my.domain.name \
+    --config-path homeserver.yaml \
+    --generate-config \
+    --report-stats=[yes|no]
+```
+
+... substituting an appropriate value for `--server-name`. The server name
+determines the "domain" part of user-ids for users on your server: these will
+all be of the format `@user:my.domain.name`. It also determines how other
+matrix servers will reach yours for Federation. For a test configuration,
+set this to the hostname of your server. For a more production-ready setup, you
+will probably want to specify your domain (`example.com`) rather than a
+matrix-specific hostname here (in the same way that your email address is
+probably `user@example.com` rather than `user@email.example.com`) - but
+doing so may require more advanced setup: see [Setting up Federation](docs/federate.md).
+Beware that the server name cannot be changed later.
+
+This command will generate you a config file that you can then customise, but it will
+also generate a set of keys for you. These keys will allow your Home Server to
+identify itself to other Home Servers, so don't lose or delete them. It would be
+wise to back them up somewhere safe. (If, for whatever reason, you do need to
+change your Home Server's keys, you may find that other Home Servers have the
+old key cached. If you update the signing key, you should change the name of the
+key in the `<server name>.signing.key` file (the second word) to something
+different. See the
+[spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys)
+for more information on key management.)
+
+You will need to give Synapse a TLS certficate before it will start - see [TLS
+certificates](#tls-certificates).
+
+To actually run your new homeserver, pick a working directory for Synapse to
+run (e.g. `~/synapse`), and::
+
+    cd ~/synapse
+    source env/bin/activate
+    synctl start
+
+### Platform-Specific Instructions
+
+#### Debian/Ubuntu/Raspbian
+
+Installing prerequisites on Ubuntu or Debian:
+
+```
+sudo apt-get install build-essential python3-dev libffi-dev \
+                     python-pip python-setuptools sqlite3 \
+                     libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
+```
+
+#### ArchLinux
+
+Installing prerequisites on ArchLinux:
+
+```
+sudo pacman -S base-devel python python-pip \
+               python-setuptools python-virtualenv sqlite3
+```
+
+#### CentOS/Fedora
+
+Installing prerequisites on CentOS 7 or Fedora 25:
+
+```
+sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
+                 lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
+                 python-virtualenv libffi-devel openssl-devel
+sudo yum groupinstall "Development Tools"
+```
+
+#### Mac OS X
+
+Installing prerequisites on Mac OS X:
+
+```
+xcode-select --install
+sudo easy_install pip
+sudo pip install virtualenv
+brew install pkg-config libffi
+```
+
+#### OpenSUSE
+
+Installing prerequisites on openSUSE:
+
+```
+sudo zypper in -t pattern devel_basis
+sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
+               python-devel libffi-devel libopenssl-devel libjpeg62-devel
+```
+
+#### OpenBSD
+
+Installing prerequisites on OpenBSD:
+
+```
+doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
+              libxslt jpeg
+```
+
+There is currently no port for OpenBSD. Additionally, OpenBSD's security
+settings require a slightly more difficult installation process.
+
+XXX: I suspect this is out of date.
+
+1. Create a new directory in `/usr/local` called `_synapse`. Also, create a
+   new user called `_synapse` and set that directory as the new user's home.
+   This is required because, by default, OpenBSD only allows binaries which need
+   write and execute permissions on the same memory space to be run from
+   `/usr/local`.
+2. `su` to the new `_synapse` user and change to their home directory.
+3. Create a new virtualenv: `virtualenv -p python2.7 ~/.synapse`
+4. Source the virtualenv configuration located at
+   `/usr/local/_synapse/.synapse/bin/activate`. This is done in `ksh` by
+   using the `.` command, rather than `bash`'s `source`.
+5. Optionally, use `pip` to install `lxml`, which Synapse needs to parse
+   webpages for their titles.
+6. Use `pip` to install this repository: `pip install matrix-synapse`
+7. Optionally, change `_synapse`'s shell to `/bin/false` to reduce the
+   chance of a compromised Synapse server being used to take over your box.
+
+After this, you may proceed with the rest of the install directions.
+
+#### Windows
+
+If you wish to run or develop Synapse on Windows, the Windows Subsystem For
+Linux provides a Linux environment on Windows 10 which is capable of using the
+Debian, Fedora, or source installation methods. More information about WSL can
+be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
+Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
+for Windows Server.
+
+### Troubleshooting Installation
+
+XXX a bunch of this is no longer relevant.
+
+Synapse requires pip 8 or later, so if your OS provides too old a version you
+may need to manually upgrade it::
+
+    sudo pip install --upgrade pip
+
+Installing may fail with `Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)`.
+You can fix this by manually upgrading pip and virtualenv::
+
+    sudo pip install --upgrade virtualenv
+
+You can next rerun `virtualenv -p python3 synapse` to update the virtual env.
+
+Installing may fail during installing virtualenv with `InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.`
+You can fix this  by manually installing ndg-httpsclient::
+
+    pip install --upgrade ndg-httpsclient
+
+Installing may fail with `mock requires setuptools>=17.1. Aborting installation`.
+You can fix this by upgrading setuptools::
+
+    pip install --upgrade setuptools
+
+If pip crashes mid-installation for reason (e.g. lost terminal), pip may
+refuse to run until you remove the temporary installation directory it
+created. To reset the installation::
+
+    rm -rf /tmp/pip_install_matrix
+
+pip seems to leak *lots* of memory during installation.  For instance, a Linux
+host with 512MB of RAM may run out of memory whilst installing Twisted.  If this
+happens, you will have to individually install the dependencies which are
+failing, e.g.::
+
+    pip install twisted
+
+## Prebuilt packages
+
+As an alternative to installing from source, prebuilt packages are available
+for a number of platforms.
+
+### Docker images and Ansible playbooks
+
+There is an offical synapse image available at
+https://hub.docker.com/r/matrixdotorg/synapse which can be used with
+the docker-compose file available at [contrib/docker](contrib/docker). Further information on
+this including configuration options is available in the README on
+hub.docker.com.
+
+Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
+Dockerfile to automate a synapse server in a single Docker image, at
+https://hub.docker.com/r/avhost/docker-matrix/tags/
+
+Slavi Pantaleev has created an Ansible playbook,
+which installs the offical Docker image of Matrix Synapse
+along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
+For more details, see
+https://github.com/spantaleev/matrix-docker-ansible-deploy
+
+
+### Debian/Ubuntu
+
+#### Matrix.org packages
+
+Matrix.org provides Debian/Ubuntu packages of the latest stable version of
+Synapse via https://matrix.org/packages/debian/. To use them:
+
+```
+sudo apt install -y lsb-release curl apt-transport-https
+echo "deb https://matrix.org/packages/debian `lsb_release -cs` main" |
+    sudo tee /etc/apt/sources.list.d/matrix-org.list
+curl "https://matrix.org/packages/debian/repo-key.asc" |
+    sudo apt-key add -
+sudo apt update
+sudo apt install matrix-synapse-py3
+```
+
+#### Downstream Debian/Ubuntu packages
+
+For `buster` and `sid`, Synapse is available in the Debian repositories and
+it should be possible to install it with simply:
+
+```
+    sudo apt install matrix-synapse
+```
+
+There is also a version of `matrix-synapse` in `stretch-backports`. Please see
+the [Debian documentation on
+backports](https://backports.debian.org/Instructions/) for information on how
+to use them.
+
+We do not recommend using the packages in downstream Ubuntu at this time, as
+they are old and suffer from known security vulnerabilities.
+
+### Fedora
+
+Synapse is in the Fedora repositories as `matrix-synapse`:
+
+```
+sudo dnf install matrix-synapse
+```
+
+Oleg Girko provides Fedora RPMs at
+https://obs.infoserver.lv/project/monitor/matrix-synapse
+
+### OpenSUSE
+
+Synapse is in the OpenSUSE repositories as `matrix-synapse`:
+
+```
+sudo zypper install matrix-synapse
+```
+
+### SUSE Linux Enterprise Server
+
+Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
+https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
+
+### ArchLinux
+
+The quickest way to get up and running with ArchLinux is probably with the community package
+https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
+the necessary dependencies.
+
+pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
+
+```
+sudo pip install --upgrade pip
+```
+
+If you encounter an error with lib bcrypt causing an Wrong ELF Class:
+ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
+compile it under the right architecture. (This should not be needed if
+installing under virtualenv):
+
+```
+sudo pip uninstall py-bcrypt
+sudo pip install py-bcrypt
+```
+
+### FreeBSD
+
+Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
+
+ - Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
+ - Packages: `pkg install py27-matrix-synapse`
+
+
+### NixOS
+
+Robin Lambertz has packaged Synapse for NixOS at:
+https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix
+
+# Setting up Synapse
+
+Once you have installed synapse as above, you will need to configure it.
+
+## TLS certificates
+
+The default configuration exposes a single HTTP port: http://localhost:8008. It
+is suitable for local testing, but for any practical use, you will either need
+to enable a reverse proxy, or configure Synapse to expose an HTTPS port.
+
+For information on using a reverse proxy, see
+[docs/reverse_proxy.rst](docs/reverse_proxy.rst).
+
+To configure Synapse to expose an HTTPS port, you will need to edit
+`homeserver.yaml`, as follows:
+
+* First, under the `listeners` section, uncomment the configuration for the
+  TLS-enabled listener. (Remove the hash sign (`#`) at the start of
+  each line). The relevant lines are like this:
+
+  ```
+    - port: 8448
+      type: http
+      tls: true
+      resources:
+        - names: [client, federation]
+  ```
+* You will also need to uncomment the `tls_certificate_path` and
+  `tls_private_key_path` lines under the `TLS` section. You can either
+  point these settings at an existing certificate and key, or you can
+  enable Synapse's built-in ACME (Let's Encrypt) support.  Instructions
+  for having Synapse automatically provision and renew federation
+  certificates through ACME can be found at [ACME.md](docs/ACME.md).
+
+For those of you upgrading your TLS certificate in readiness for Synapse 1.0,
+please take a look at `our guide <docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100>`_.
+
+## Registering a user
+
+You will need at least one user on your server in order to use a Matrix
+client. Users can be registered either via a Matrix client, or via a
+commandline script.
+
+To get started, it is easiest to use the command line to register new
+users. This can be done as follows:
+
+```
+$ source ~/synapse/env/bin/activate
+$ synctl start # if not already running
+$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
+New user localpart: erikj
+Password:
+Confirm password:
+Make admin [no]:
+Success!
+```
+
+This process uses a setting `registration_shared_secret` in
+`homeserver.yaml`, which is shared between Synapse itself and the
+`register_new_matrix_user` script. It doesn't matter what it is (a random
+value is generated by `--generate-config`), but it should be kept secret, as
+anyone with knowledge of it can register users on your server even if
+`enable_registration` is `false`.
+
+## Setting up a TURN server
+
+For reliable VoIP calls to be routed via this homeserver, you MUST configure
+a TURN server.  See [docs/turn-howto.rst](docs/turn-howto.rst) for details.
+
+## URL previews
+
+Synapse includes support for previewing URLs, which is disabled by default.  To
+turn it on you must enable the `url_preview_enabled: True` config parameter
+and explicitly specify the IP ranges that Synapse is not allowed to spider for
+previewing in the `url_preview_ip_range_blacklist` configuration parameter.
+This is critical from a security perspective to stop arbitrary Matrix users
+spidering 'internal' URLs on your network.  At the very least we recommend that
+your loopback and RFC1918 IP addresses are blacklisted.
+
+This also requires the optional lxml and netaddr python dependencies to be
+installed.  This in turn requires the libxml2 library to be available - on
+Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for
+your OS.
diff --git a/MANIFEST.in b/MANIFEST.in
index 25cdf0a61b..eb2de60f72 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -15,6 +15,7 @@ recursive-include docs *
 recursive-include scripts *
 recursive-include scripts-dev *
 recursive-include synapse *.pyi
+recursive-include tests *.pem
 recursive-include tests *.py
 
 recursive-include synapse/res *
@@ -26,6 +27,7 @@ recursive-include synapse/static *.js
 exclude Dockerfile
 exclude .dockerignore
 exclude test_postgresql.sh
+exclude .editorconfig
 
 include pyproject.toml
 recursive-include changelog.d *
@@ -34,6 +36,9 @@ prune .github
 prune demo/etc
 prune docker
 prune .circleci
+prune .coveragerc
+prune debian
+prune .codecov.yml
 
 exclude jenkins*
 recursive-exclude jenkins *.sh
diff --git a/README.rst b/README.rst
index 9165db8319..7cb2c82b79 100644
--- a/README.rst
+++ b/README.rst
@@ -26,7 +26,6 @@ via IRC bridge at irc://irc.freenode.net/matrix.
 Synapse is currently in rapid development, but as of version 0.5 we believe it
 is sufficiently stable to be run as an internet-facing service for real usage!
 
-
 About Matrix
 ============
 
@@ -81,221 +80,34 @@ Thanks for using Matrix!
 Synapse Installation
 ====================
 
-Synapse is the reference Python/Twisted Matrix homeserver implementation.
-
-System requirements:
-
-- POSIX-compliant system (tested on Linux & OS X)
-- Python 2.7
-- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
-
-Installing from source
-----------------------
-
-(Prebuilt packages are available for some platforms - see `Platform-Specific
-Instructions`_.)
-
-Synapse is written in Python but some of the libraries it uses are written in
-C. So before we can install Synapse itself we need a working C compiler and the
-header files for Python C extensions.
-
-Installing prerequisites on Ubuntu or Debian::
-
-    sudo apt-get install build-essential python2.7-dev libffi-dev \
-                         python-pip python-setuptools sqlite3 \
-                         libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
-
-Installing prerequisites on ArchLinux::
-
-    sudo pacman -S base-devel python2 python-pip \
-                   python-setuptools python-virtualenv sqlite3
-
-Installing prerequisites on CentOS 7 or Fedora 25::
-
-    sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
-                     lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
-                     python-virtualenv libffi-devel openssl-devel
-    sudo yum groupinstall "Development Tools"
-
-Installing prerequisites on Mac OS X::
-
-    xcode-select --install
-    sudo easy_install pip
-    sudo pip install virtualenv
-    brew install pkg-config libffi
-
-Installing prerequisites on Raspbian::
-
-    sudo apt-get install build-essential python2.7-dev libffi-dev \
-                         python-pip python-setuptools sqlite3 \
-                         libssl-dev python-virtualenv libjpeg-dev
-    sudo pip install --upgrade pip
-    sudo pip install --upgrade ndg-httpsclient
-    sudo pip install --upgrade virtualenv
-
-Installing prerequisites on openSUSE::
-
-    sudo zypper in -t pattern devel_basis
-    sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
-                   python-devel libffi-devel libopenssl-devel libjpeg62-devel
-
-Installing prerequisites on OpenBSD::
-
-    doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
-                 libxslt
-
-To install the Synapse homeserver run::
-
-    virtualenv -p python2.7 ~/.synapse
-    source ~/.synapse/bin/activate
-    pip install --upgrade pip
-    pip install --upgrade setuptools
-    pip install matrix-synapse
-
-This installs Synapse, along with the libraries it uses, into a virtual
-environment under ``~/.synapse``.  Feel free to pick a different directory
-if you prefer.
-
-This Synapse installation can then be later upgraded by using pip again with the
-update flag::
-
-    source ~/.synapse/bin/activate
-    pip install -U matrix-synapse
-
-In case of problems, please see the _`Troubleshooting` section below.
-
-There is an offical synapse image available at
-https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
-the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
-this including configuration options is available in the README on
-hub.docker.com.
-
-Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
-Dockerfile to automate a synapse server in a single Docker image, at
-https://hub.docker.com/r/avhost/docker-matrix/tags/
-
-Slavi Pantaleev has created an Ansible playbook,
-which installs the offical Docker image of Matrix Synapse
-along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
-For more details, see
-https://github.com/spantaleev/matrix-docker-ansible-deploy
-
-Configuring Synapse
--------------------
-
-Before you can start Synapse, you will need to generate a configuration
-file. To do this, run (in your virtualenv, as before)::
-
-    cd ~/.synapse
-    python -m synapse.app.homeserver \
-        --server-name my.domain.name \
-        --config-path homeserver.yaml \
-        --generate-config \
-        --report-stats=[yes|no]
-
-... substituting an appropriate value for ``--server-name``. The server name
-determines the "domain" part of user-ids for users on your server: these will
-all be of the format ``@user:my.domain.name``. It also determines how other
-matrix servers will reach yours for `Federation`_. For a test configuration,
-set this to the hostname of your server. For a more production-ready setup, you
-will probably want to specify your domain (``example.com``) rather than a
-matrix-specific hostname here (in the same way that your email address is
-probably ``user@example.com`` rather than ``user@email.example.com``) - but
-doing so may require more advanced setup - see `Setting up
-Federation`_. Beware that the server name cannot be changed later.
-
-This command will generate you a config file that you can then customise, but it will
-also generate a set of keys for you. These keys will allow your Home Server to
-identify itself to other Home Servers, so don't lose or delete them. It would be
-wise to back them up somewhere safe. (If, for whatever reason, you do need to
-change your Home Server's keys, you may find that other Home Servers have the
-old key cached. If you update the signing key, you should change the name of the
-key in the ``<server name>.signing.key`` file (the second word) to something
-different. See `the spec`__ for more information on key management.)
-
-.. __: `key_management`_
-
-The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
-configured without TLS; it should be behind a reverse proxy for TLS/SSL
-termination on port 443 which in turn should be used for clients. Port 8448
-is configured to use TLS with a self-signed certificate. If you would like
-to do initial test with a client without having to setup a reverse proxy,
-you can temporarly use another certificate. (Note that a self-signed
-certificate is fine for `Federation`_). You can do so by changing
-``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
-in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
-to read `Using a reverse proxy with Synapse`_ when doing so.
-
-Apart from port 8448 using TLS, both ports are the same in the default
-configuration.
-
-Registering a user
-------------------
-
-You will need at least one user on your server in order to use a Matrix
-client. Users can be registered either `via a Matrix client`__, or via a
-commandline script.
-
-.. __: `client-user-reg`_
-
-To get started, it is easiest to use the command line to register new users::
-
-    $ source ~/.synapse/bin/activate
-    $ synctl start # if not already running
-    $ register_new_matrix_user -c homeserver.yaml https://localhost:8448
-    New user localpart: erikj
-    Password:
-    Confirm password:
-    Make admin [no]:
-    Success!
-
-This process uses a setting ``registration_shared_secret`` in
-``homeserver.yaml``, which is shared between Synapse itself and the
-``register_new_matrix_user`` script. It doesn't matter what it is (a random
-value is generated by ``--generate-config``), but it should be kept secret, as
-anyone with knowledge of it can register users on your server even if
-``enable_registration`` is ``false``.
-
-Setting up a TURN server
-------------------------
-
-For reliable VoIP calls to be routed via this homeserver, you MUST configure
-a TURN server.  See `<docs/turn-howto.rst>`_ for details.
-
-Running Synapse
-===============
-
-To actually run your new homeserver, pick a working directory for Synapse to
-run (e.g. ``~/.synapse``), and::
+.. _federation:
 
-    cd ~/.synapse
-    source ./bin/activate
-    synctl start
+* For details on how to install synapse, see `<INSTALL.md>`_.
+* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
 
 
 Connecting to Synapse from a client
 ===================================
 
 The easiest way to try out your new Synapse installation is by connecting to it
-from a web client. The easiest option is probably the one at
-https://riot.im/app. You will need to specify a "Custom server" when you log on
-or register: set this to ``https://domain.tld`` if you setup a reverse proxy
-following the recommended setup, or ``https://localhost:8448`` - remember to specify the
-port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
-server as the default - see `Identity servers`_.)
-
-If using port 8448 you will run into errors until you accept the self-signed
-certificate. You can easily do this by going to ``https://localhost:8448``
-directly with your browser and accept the presented certificate. You can then
-go back in your web client and proceed further.
+from a web client.
+
+Unless you are running a test instance of Synapse on your local machine, in
+general, you will need to enable TLS support before you can successfully
+connect from a client: see `<INSTALL.md#tls-certificates>`_.
+
+An easy way to get started is to login or register via Riot at
+https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
+You will need to change the server you are logging into from ``matrix.org``
+and instead specify a Homeserver URL of ``https://<server_name>:8448``
+(or just ``https://<server_name>`` if you are using a reverse proxy).
+(Leave the identity server as the default - see `Identity servers`_.)
+If you prefer to use another client, refer to our
+`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
 
 If all goes well you should at least be able to log in, create a room, and
 start sending messages.
 
-(The homeserver runs a web client by default at https://localhost:8448/, though
-as of the time of writing it is somewhat outdated and not really recommended -
-https://github.com/matrix-org/synapse/issues/1527).
-
 .. _`client-user-reg`:
 
 Registering a new user from a client
@@ -319,6 +131,12 @@ create the account. Your name will take the form of::
 As when logging in, you will need to specify a "Custom server".  Specify your
 desired ``localpart`` in the 'User name' box.
 
+ACME setup
+==========
+
+For details on having Synapse manage your federation TLS certificates
+automatically, please see `<docs/ACME.md>`_.
+
 
 Security Note
 =============
@@ -333,208 +151,10 @@ content served to web browsers a matrix API from being able to attack webapps ho
 on the same domain.  This is particularly true of sharing a matrix webclient and
 server on the same domain.
 
-See https://github.com/vector-im/vector-web/issues/1977 and
+See https://github.com/vector-im/riot-web/issues/1977 and
 https://developer.github.com/changes/2014-04-25-user-content-security for more details.
 
 
-Platform-Specific Instructions
-==============================
-
-Debian
-------
-
-Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/.
-Note that these packages do not include a client - choose one from
-https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
-
-Fedora
-------
-
-Synapse is in the Fedora repositories as ``matrix-synapse``::
-
-    sudo dnf install matrix-synapse
-
-Oleg Girko provides Fedora RPMs at
-https://obs.infoserver.lv/project/monitor/matrix-synapse
-
-OpenSUSE
---------
-
-Synapse is in the OpenSUSE repositories as ``matrix-synapse``::
-
-    sudo zypper install matrix-synapse
-
-SUSE Linux Enterprise Server
-----------------------------
-
-Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
-https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
-
-ArchLinux
----------
-
-The quickest way to get up and running with ArchLinux is probably with the community package
-https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
-the necessary dependencies. If the default web client is to be served (enabled by default in
-the generated config),
-https://www.archlinux.org/packages/community/any/python2-matrix-angular-sdk/ will also need to
-be installed.
-
-Alternatively, to install using pip a few changes may be needed as ArchLinux
-defaults to python 3, but synapse currently assumes python 2.7 by default:
-
-pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 )::
-
-    sudo pip2.7 install --upgrade pip
-
-You also may need to explicitly specify python 2.7 again during the install
-request::
-
-    pip2.7 install https://github.com/matrix-org/synapse/tarball/master
-
-If you encounter an error with lib bcrypt causing an Wrong ELF Class:
-ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
-compile it under the right architecture. (This should not be needed if
-installing under virtualenv)::
-
-    sudo pip2.7 uninstall py-bcrypt
-    sudo pip2.7 install py-bcrypt
-
-During setup of Synapse you need to call python2.7 directly again::
-
-    cd ~/.synapse
-    python2.7 -m synapse.app.homeserver \
-      --server-name machine.my.domain.name \
-      --config-path homeserver.yaml \
-      --generate-config
-
-...substituting your host and domain name as appropriate.
-
-FreeBSD
--------
-
-Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
-
- - Ports: ``cd /usr/ports/net-im/py-matrix-synapse && make install clean``
- - Packages: ``pkg install py27-matrix-synapse``
-
-
-OpenBSD
--------
-
-There is currently no port for OpenBSD. Additionally, OpenBSD's security
-settings require a slightly more difficult installation process.
-
-1) Create a new directory in ``/usr/local`` called ``_synapse``. Also, create a
-   new user called ``_synapse`` and set that directory as the new user's home.
-   This is required because, by default, OpenBSD only allows binaries which need
-   write and execute permissions on the same memory space to be run from
-   ``/usr/local``.
-2) ``su`` to the new ``_synapse`` user and change to their home directory.
-3) Create a new virtualenv: ``virtualenv -p python2.7 ~/.synapse``
-4) Source the virtualenv configuration located at
-   ``/usr/local/_synapse/.synapse/bin/activate``. This is done in ``ksh`` by
-   using the ``.`` command, rather than ``bash``'s ``source``.
-5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
-   webpages for their titles.
-6) Use ``pip`` to install this repository: ``pip install matrix-synapse``
-7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
-   chance of a compromised Synapse server being used to take over your box.
-
-After this, you may proceed with the rest of the install directions.
-
-NixOS
------
-
-Robin Lambertz has packaged Synapse for NixOS at:
-https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix
-
-Windows Install
----------------
-
-If you wish to run or develop Synapse on Windows, the Windows Subsystem For
-Linux provides a Linux environment on Windows 10 which is capable of using the
-Debian, Fedora, or source installation methods. More information about WSL can
-be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
-Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
-for Windows Server.
-
-Troubleshooting
-===============
-
-Troubleshooting Installation
-----------------------------
-
-Synapse requires pip 8 or later, so if your OS provides too old a version you
-may need to manually upgrade it::
-
-    sudo pip install --upgrade pip
-
-Installing may fail with ``Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)``.
-You can fix this by manually upgrading pip and virtualenv::
-
-    sudo pip install --upgrade virtualenv
-
-You can next rerun ``virtualenv -p python2.7 synapse`` to update the virtual env.
-
-Installing may fail during installing virtualenv with ``InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.``
-You can fix this  by manually installing ndg-httpsclient::
-
-    pip install --upgrade ndg-httpsclient
-
-Installing may fail with ``mock requires setuptools>=17.1. Aborting installation``.
-You can fix this by upgrading setuptools::
-
-    pip install --upgrade setuptools
-
-If pip crashes mid-installation for reason (e.g. lost terminal), pip may
-refuse to run until you remove the temporary installation directory it
-created. To reset the installation::
-
-    rm -rf /tmp/pip_install_matrix
-
-pip seems to leak *lots* of memory during installation.  For instance, a Linux
-host with 512MB of RAM may run out of memory whilst installing Twisted.  If this
-happens, you will have to individually install the dependencies which are
-failing, e.g.::
-
-    pip install twisted
-
-Running out of File Handles
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If synapse runs out of filehandles, it typically fails badly - live-locking
-at 100% CPU, and/or failing to accept new TCP connections (blocking the
-connecting client).  Matrix currently can legitimately use a lot of file handles,
-thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
-servers.  The first time a server talks in a room it will try to connect
-simultaneously to all participating servers, which could exhaust the available
-file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
-to respond.  (We need to improve the routing algorithm used to be better than
-full mesh, but as of June 2017 this hasn't happened yet).
-
-If you hit this failure mode, we recommend increasing the maximum number of
-open file handles to be at least 4096 (assuming a default of 1024 or 256).
-This is typically done by editing ``/etc/security/limits.conf``
-
-Separately, Synapse may leak file handles if inbound HTTP requests get stuck
-during processing - e.g. blocked behind a lock or talking to a remote server etc.
-This is best diagnosed by matching up the 'Received request' and 'Processed request'
-log lines and looking for any 'Processed request' lines which take more than
-a few seconds to execute.  Please let us know at #matrix-dev:matrix.org if
-you see this failure mode so we can help debug it, however.
-
-ArchLinux
-~~~~~~~~~
-
-If running `$ synctl start` fails with 'returned non-zero exit status 1',
-you will need to explicitly call Python2.7 - either running as::
-
-    python2.7 -m synapse.app.homeserver --daemonize -c homeserver.yaml
-
-...or by editing synctl with the correct python executable.
-
-
 Upgrading an existing Synapse
 =============================
 
@@ -544,100 +164,19 @@ versions of synapse.
 
 .. _UPGRADE.rst: UPGRADE.rst
 
-.. _federation:
-
-Setting up Federation
-=====================
-
-Federation is the process by which users on different servers can participate
-in the same room. For this to work, those other servers must be able to contact
-yours to send messages.
-
-As explained in `Configuring synapse`_, the ``server_name`` in your
-``homeserver.yaml`` file determines the way that other servers will reach
-yours. By default, they will treat it as a hostname and try to connect to
-port 8448. This is easy to set up and will work with the default configuration,
-provided you set the ``server_name`` to match your machine's public DNS
-hostname.
-
-For a more flexible configuration, you can set up a DNS SRV record. This allows
-you to run your server on a machine that might not have the same name as your
-domain name. For example, you might want to run your server at
-``synapse.example.com``, but have your Matrix user-ids look like
-``@user:example.com``. (A SRV record also allows you to change the port from
-the default 8448. However, if you are thinking of using a reverse-proxy on the
-federation port, which is not recommended, be sure to read
-`Reverse-proxying the federation port`_ first.)
-
-To use a SRV record, first create your SRV record and publish it in DNS. This
-should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
-<synapse.server.name>``. The DNS record should then look something like::
-
-    $ dig -t srv _matrix._tcp.example.com
-    _matrix._tcp.example.com. 3600    IN      SRV     10 0 8448 synapse.example.com.
-
-Note that the server hostname cannot be an alias (CNAME record): it has to point
-directly to the server hosting the synapse instance.
-
-You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
-its user-ids, by setting ``server_name``::
-
-    python -m synapse.app.homeserver \
-        --server-name <yourdomain.com> \
-        --config-path homeserver.yaml \
-        --generate-config
-    python -m synapse.app.homeserver --config-path homeserver.yaml
-
-If you've already generated the config file, you need to edit the ``server_name``
-in your ``homeserver.yaml`` file. If you've already started Synapse and a
-database has been created, you will have to recreate the database.
-
-If all goes well, you should be able to `connect to your server with a client`__,
-and then join a room via federation. (Try ``#matrix-dev:matrix.org`` as a first
-step. "Matrix HQ"'s sheer size and activity level tends to make even the
-largest boxes pause for thought.)
-
-.. __: `Connecting to Synapse from a client`_
-
-Troubleshooting
----------------
-
-You can use the federation tester to check if your homeserver is all set:
-``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``
-If any of the attributes under "checks" is false, federation won't work.
-
-The typical failure mode with federation is that when you try to join a room,
-it is rejected with "401: Unauthorized". Generally this means that other
-servers in the room couldn't access yours. (Joining a room over federation is a
-complicated dance which requires connections in both directions).
-
-So, things to check are:
-
-* If you are trying to use a reverse-proxy, read `Reverse-proxying the
-  federation port`_.
-* If you are not using a SRV record, check that your ``server_name`` (the part
-  of your user-id after the ``:``) matches your hostname, and that port 8448 on
-  that hostname is reachable from outside your network.
-* If you *are* using a SRV record, check that it matches your ``server_name``
-  (it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
-  it specifies are reachable from outside your network.
-
-Running a Demo Federation of Synapses
--------------------------------------
-
-If you want to get up and running quickly with a trio of homeservers in a
-private federation, there is a script in the ``demo`` directory. This is mainly
-useful just for development purposes. See `<demo/README>`_.
-
 
 Using PostgreSQL
 ================
 
-As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
-alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
-traditionally used for convenience and simplicity.
+Synapse offers two database engines:
+ * `SQLite <https://sqlite.org/>`_
+ * `PostgreSQL <https://www.postgresql.org>`_
+
+By default Synapse uses SQLite in and doing so trades performance for convenience.
+SQLite is only recommended in Synapse for testing purposes or for servers with 
+light workloads.
 
-The advantages of Postgres include:
+Almost all installations should opt to use PostreSQL. Advantages include:
 
 * significant performance improvements due to the superior threading and
   caching model, smarter query optimiser
@@ -649,7 +188,6 @@ The advantages of Postgres include:
 For information on how to install and use PostgreSQL, please see
 `docs/postgres.rst <docs/postgres.rst>`_.
 
-
 .. _reverse-proxy:
 
 Using a reverse proxy with Synapse
@@ -663,117 +201,7 @@ It is recommended to put a reverse proxy such as
 doing so is that it means that you can expose the default https port (443) to
 Matrix clients without needing to run Synapse with root privileges.
 
-The most important thing to know here is that Matrix clients and other Matrix
-servers do not necessarily need to connect to your server via the same
-port. Indeed, clients will use port 443 by default, whereas servers default to
-port 8448. Where these are different, we refer to the 'client port' and the
-'federation port'.
-
-The next most important thing to know is that using a reverse-proxy on the
-federation port has a number of pitfalls. It is possible, but be sure to read
-`Reverse-proxying the federation port`_.
-
-The recommended setup is therefore to configure your reverse-proxy on port 443
-to port 8008 of synapse for client connections, but to also directly expose port
-8448 for server-server connections. All the Matrix endpoints begin ``/_matrix``,
-so an example nginx configuration might look like::
-
-  server {
-      listen 443 ssl;
-      listen [::]:443 ssl;
-      server_name matrix.example.com;
-
-      location /_matrix {
-          proxy_pass http://localhost:8008;
-          proxy_set_header X-Forwarded-For $remote_addr;
-      }
-  }
-
-an example Caddy configuration might look like::
-
-    matrix.example.com {
-      proxy /_matrix http://localhost:8008 {
-        transparent
-      }
-    }
-
-and an example Apache configuration might look like::
-
-    <VirtualHost *:443>
-        SSLEngine on
-        ServerName matrix.example.com;
-
-        <Location /_matrix>
-            ProxyPass http://127.0.0.1:8008/_matrix nocanon
-            ProxyPassReverse http://127.0.0.1:8008/_matrix
-        </Location>
-    </VirtualHost>
-
-You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
-for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
-recorded correctly.
-
-Having done so, you can then use ``https://matrix.example.com`` (instead of
-``https://matrix.example.com:8448``) as the "Custom server" when `Connecting to
-Synapse from a client`_.
-
-Reverse-proxying the federation port
-------------------------------------
-
-There are two issues to consider before using a reverse-proxy on the federation
-port:
-
-* Due to the way SSL certificates are managed in the Matrix federation protocol
-  (see `spec`__), Synapse needs to be configured with the path to the SSL
-  certificate, *even if you do not terminate SSL at Synapse*.
-
-  .. __: `key_management`_
-
-* Synapse does not currently support SNI on the federation protocol
-  (`bug #1491 <https://github.com/matrix-org/synapse/issues/1491>`_), which
-  means that using name-based virtual hosting is unreliable.
-
-Furthermore, a number of the normal reasons for using a reverse-proxy do not
-apply:
-
-* Other servers will connect on port 8448 by default, so there is no need to
-  listen on port 443 (for federation, at least), which avoids the need for root
-  privileges and virtual hosting.
-
-* A self-signed SSL certificate is fine for federation, so there is no need to
-  automate renewals. (The certificate generated by ``--generate-config`` is
-  valid for 10 years.)
-
-If you want to set up a reverse-proxy on the federation port despite these
-caveats, you will need to do the following:
-
-* In ``homeserver.yaml``, set ``tls_certificate_path`` to the path to the SSL
-  certificate file used by your reverse-proxy, and set ``no_tls`` to ``True``.
-  (``tls_private_key_path`` will be ignored if ``no_tls`` is ``True``.)
-
-* In your reverse-proxy configuration:
-
-  * If there are other virtual hosts on the same port, make sure that the
-    *default* one uses the certificate configured above.
-
-  * Forward ``/_matrix`` to Synapse.
-
-* If your reverse-proxy is not listening on port 8448, publish a SRV record to
-  tell other servers how to find you. See `Setting up Federation`_.
-
-When updating the SSL certificate, just update the file pointed to by
-``tls_certificate_path``: there is no need to restart synapse. (You may like to
-use a symbolic link to help make this process atomic.)
-
-The most common mistake when setting up federation is not to tell Synapse about
-your SSL certificate. To check it, you can visit
-``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``.
-Unfortunately, there is no UI for this yet, but, you should see
-``"MatchingTLSFingerprint": true``. If not, check that
-``Certificates[0].SHA256Fingerprint`` (the fingerprint of the certificate
-presented by your reverse-proxy) matches ``Keys.tls_fingerprints[0].sha256``
-(the fingerprint of the certificate Synapse is using).
-
+For information on configuring one, see `<docs/reverse_proxy.rst>`_.
 
 Identity Servers
 ================
@@ -805,36 +233,17 @@ an email address with your account, or send an invite to another user via their
 email address.
 
 
-URL Previews
-============
-
-Synapse 0.15.0 introduces a new API for previewing URLs at
-``/_matrix/media/r0/preview_url``.  This is disabled by default.  To turn it on
-you must enable the ``url_preview_enabled: True`` config parameter and
-explicitly specify the IP ranges that Synapse is not allowed to spider for
-previewing in the ``url_preview_ip_range_blacklist`` configuration parameter.
-This is critical from a security perspective to stop arbitrary Matrix users
-spidering 'internal' URLs on your network.  At the very least we recommend that
-your loopback and RFC1918 IP addresses are blacklisted.
-
-This also requires the optional lxml and netaddr python dependencies to be
-installed.  This in turn requires the libxml2 library to be available - on
-Debian/Ubuntu this means ``apt-get install libxml2-dev``, or equivalent for
-your OS.
-
-
 Password reset
 ==============
 
 If a user has registered an email address to their account using an identity
-server, they can request a password-reset token via clients such as Vector.
+server, they can request a password-reset token via clients such as Riot.
 
 A manual password reset can be done via direct database access as follows.
 
 First calculate the hash of the new password::
 
-    $ source ~/.synapse/bin/activate
-    $ ./scripts/hash_password
+    $ ~/synapse/env/bin/hash_password
     Password:
     Confirm password:
     $2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
@@ -850,7 +259,7 @@ Synapse Development
 
 Before setting up a development environment for synapse, make sure you have the
 system dependencies (such as the python header files) installed - see
-`Installing from source`_.
+`Installing from source <INSTALL.md#installing-from-source>`_.
 
 To check out a synapse for development, clone the git repo into a working
 directory of your choice::
@@ -861,10 +270,9 @@ directory of your choice::
 Synapse has a number of external dependencies, that are easiest
 to install using pip and a virtualenv::
 
-    virtualenv -p python2.7 env
+    virtualenv -p python3 env
     source env/bin/activate
-    python -m synapse.python_dependencies | xargs pip install
-    pip install lxml mock
+    python -m pip install -e .[all]
 
 This will run a process of downloading and installing all the needed
 dependencies into a virtual env.
@@ -872,7 +280,7 @@ dependencies into a virtual env.
 Once this is done, you may wish to run Synapse's unit tests, to
 check that everything is installed as it should be::
 
-    PYTHONPATH="." trial tests
+    python -m twisted.trial tests
 
 This should end with a 'PASSED' result::
 
@@ -905,16 +313,42 @@ Building internal API documentation::
 
     python setup.py build_sphinx
 
+Troubleshooting
+===============
+
+Running out of File Handles
+---------------------------
+
+If synapse runs out of file handles, it typically fails badly - live-locking
+at 100% CPU, and/or failing to accept new TCP connections (blocking the
+connecting client).  Matrix currently can legitimately use a lot of file handles,
+thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
+servers.  The first time a server talks in a room it will try to connect
+simultaneously to all participating servers, which could exhaust the available
+file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
+to respond. (We need to improve the routing algorithm used to be better than
+full mesh, but as of March 2019 this hasn't happened yet).
+
+If you hit this failure mode, we recommend increasing the maximum number of
+open file handles to be at least 4096 (assuming a default of 1024 or 256).
+This is typically done by editing ``/etc/security/limits.conf``
+
+Separately, Synapse may leak file handles if inbound HTTP requests get stuck
+during processing - e.g. blocked behind a lock or talking to a remote server etc.
+This is best diagnosed by matching up the 'Received request' and 'Processed request'
+log lines and looking for any 'Processed request' lines which take more than
+a few seconds to execute. Please let us know at #synapse:matrix.org if
+you see this failure mode so we can help debug it, however.
 
 Help!! Synapse eats all my RAM!
-===============================
+-------------------------------
 
 Synapse's architecture is quite RAM hungry currently - we deliberately
 cache a lot of recent room data and metadata in RAM in order to speed up
-common requests.  We'll improve this in future, but for now the easiest
+common requests. We'll improve this in the future, but for now the easiest
 way to either reduce the RAM usage (at the risk of slowing things down)
 is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
-variable.  The default is 0.5, which can be decreased to reduce RAM usage
+variable. The default is 0.5, which can be decreased to reduce RAM usage
 in memory constrained enviroments, or increased if performance starts to
 degrade.
 
@@ -927,4 +361,5 @@ by installing the ``libjemalloc1`` package and adding this line to
 
     LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
 
-.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
+This can make a significant difference on Python 2.7 - it's unclear how
+much of an improvement it provides on Python 3.x.
diff --git a/UPGRADE.rst b/UPGRADE.rst
index 55c77eedde..228222d534 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -5,20 +5,20 @@ Before upgrading check if any special steps are required to upgrade from the
 what you currently have installed to current version of synapse. The extra
 instructions that may be required are listed later in this document.
 
-1. If synapse was installed in a virtualenv then active that virtualenv before
-   upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then
+1. If synapse was installed in a virtualenv then activate that virtualenv before
+   upgrading. If synapse is installed in a virtualenv in ``~/synapse/env`` then
    run:
 
    .. code:: bash
 
-       source ~/.synapse/bin/activate
+       source ~/synapse/env/bin/activate
 
 2. If synapse was installed using pip then upgrade to the latest version by
    running:
 
    .. code:: bash
 
-       pip install --upgrade --process-dependency-links matrix-synapse
+       pip install --upgrade matrix-synapse[all]
 
        # restart synapse
        synctl restart
@@ -31,14 +31,15 @@ instructions that may be required are listed later in this document.
 
        # Pull the latest version of the master branch.
        git pull
-       # Update the versions of synapse's python dependencies.
-       python synapse/python_dependencies.py | xargs pip install --upgrade
+
+       # Update synapse and its python dependencies.
+       pip install --upgrade .[all]
 
        # restart synapse
        ./synctl restart
 
 
-To check whether your update was sucessful, you can check the Server header
+To check whether your update was successful, you can check the Server header
 returned by the Client-Server API:
 
 .. code:: bash
@@ -48,6 +49,96 @@ returned by the Client-Server API:
     # configured on port 443.
     curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
 
+Upgrading to v0.99.0
+====================
+
+Please be aware that, before Synapse v1.0 is released around March 2019, you
+will need to replace any self-signed certificates with those verified by a
+root CA. Information on how to do so can be found at `the ACME docs
+<docs/ACME.md>`_.
+
+For more information on configuring TLS certificates see the `FAQ <docs/MSC1711_certificates_FAQ.md>`_.
+
+Upgrading to v0.34.0
+====================
+
+1. This release is the first to fully support Python 3. Synapse will now run on
+   Python versions 3.5, or 3.6 (as well as 2.7). We recommend switching to
+   Python 3, as it has been shown to give performance improvements.
+
+   For users who have installed Synapse into a virtualenv, we recommend doing
+   this by creating a new virtualenv. For example::
+
+       virtualenv -p python3 ~/synapse/env3
+       source ~/synapse/env3/bin/activate
+       pip install matrix-synapse
+
+   You can then start synapse as normal, having activated the new virtualenv::
+
+       cd ~/synapse
+       source env3/bin/activate
+       synctl start
+
+   Users who have installed from distribution packages should see the relevant
+   package documentation. See below for notes on Debian packages.
+
+   * When upgrading to Python 3, you **must** make sure that your log files are
+     configured as UTF-8, by adding ``encoding: utf8`` to the
+     ``RotatingFileHandler`` configuration (if you have one) in your
+     ``<server>.log.config`` file. For example, if your ``log.config`` file
+     contains::
+
+       handlers:
+         file:
+           class: logging.handlers.RotatingFileHandler
+           formatter: precise
+           filename: homeserver.log
+           maxBytes: 104857600
+           backupCount: 10
+           filters: [context]
+         console:
+           class: logging.StreamHandler
+           formatter: precise
+           filters: [context]
+
+     Then you should update this to be::
+
+       handlers:
+         file:
+           class: logging.handlers.RotatingFileHandler
+           formatter: precise
+           filename: homeserver.log
+           maxBytes: 104857600
+           backupCount: 10
+           filters: [context]
+           encoding: utf8
+         console:
+           class: logging.StreamHandler
+           formatter: precise
+           filters: [context]
+
+     There is no need to revert this change if downgrading to Python 2.
+
+   We are also making available Debian packages which will run Synapse on
+   Python 3. You can switch to these packages with ``apt-get install
+   matrix-synapse-py3``, however, please read `debian/NEWS
+   <https://github.com/matrix-org/synapse/blob/release-v0.34.0/debian/NEWS>`_
+   before doing so. The existing ``matrix-synapse`` packages will continue to
+   use Python 2 for the time being.
+
+2. This release removes the ``riot.im`` from the default list of trusted
+   identity servers.
+
+   If ``riot.im`` is in your homeserver's list of
+   ``trusted_third_party_id_servers``, you should remove it. It was added in
+   case a hypothetical future identity server was put there. If you don't
+   remove it, users may be unable to deactivate their accounts.
+
+3. This release no longer installs the (unmaintained) Matrix Console web client
+   as part of the default installation. It is possible to re-enable it by
+   installing it separately and setting the ``web_client_location`` config
+   option, but please consider switching to another client.
+
 Upgrading to v0.33.7
 ====================
 
diff --git a/changelog.d/4832.misc b/changelog.d/4832.misc
new file mode 100644
index 0000000000..92022266c6
--- /dev/null
+++ b/changelog.d/4832.misc
@@ -0,0 +1 @@
+Improve federation documentation, specifically .well-known support. Many thanks to @vaab.
diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml
index 3a8dfbae34..1e4ee43758 100644
--- a/contrib/docker/docker-compose.yml
+++ b/contrib/docker/docker-compose.yml
@@ -6,9 +6,11 @@ version: '3'
 services:
 
   synapse:
-    build: ../..
+    build:
+        context: ../..
+        dockerfile: docker/Dockerfile
     image: docker.io/matrixdotorg/synapse:latest
-    # Since snyapse does not retry to connect to the database, restart upon
+    # Since synapse does not retry to connect to the database, restart upon
     # failure
     restart: unless-stopped
     # See the readme for a full documentation of the environment settings
@@ -35,7 +37,7 @@ services:
     labels:
       - traefik.enable=true
       - traefik.frontend.rule=Host:my.matrix.Host
-      - traefik.port=8448
+      - traefik.port=8008
 
   db:
     image: docker.io/postgres:10-alpine
@@ -47,4 +49,4 @@ services:
       # You may store the database tables in a local folder..
       - ./schemas:/var/lib/postgresql/data
       # .. or store them on some high performance storage for better results
-      # - /path/to/ssd/storage:/var/lib/postfesql/data
+      # - /path/to/ssd/storage:/var/lib/postgresql/data
diff --git a/contrib/prometheus/README b/contrib/prometheus/README.md
index 7b733172e6..e646cb7ea7 100644
--- a/contrib/prometheus/README
+++ b/contrib/prometheus/README.md
@@ -6,8 +6,10 @@ To use it, first install prometheus by following the instructions at
   http://prometheus.io/
 
 ### for Prometheus v1
+
 Add a new job to the main prometheus.conf file:
 
+```yaml
   job: {
     name: "synapse"
 
@@ -15,10 +17,12 @@ Add a new job to the main prometheus.conf file:
       target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
     }
   }
+```
 
 ### for Prometheus v2
 Add a new job to the main prometheus.yml file:
 
+```yaml
   - job_name: "synapse"
     metrics_path: "/_synapse/metrics"
     # when endpoint uses https:
@@ -26,11 +30,14 @@ Add a new job to the main prometheus.yml file:
 
     static_configs:
     - targets: ['SERVER.LOCATION:PORT']
+```
 
 To use `synapse.rules` add
 
+```yaml
     rule_files:
       - "/PATH/TO/synapse-v2.rules"
+```
 
 Metrics are disabled by default when running synapse; they must be enabled
 with the 'enable-metrics' option, either in the synapse config file or as a
diff --git a/contrib/purge_api/README.md b/contrib/purge_api/README.md
new file mode 100644
index 0000000000..000bf35ca7
--- /dev/null
+++ b/contrib/purge_api/README.md
@@ -0,0 +1,16 @@
+Purge history API examples
+==========================
+
+# `purge_history.sh`
+
+A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to 
+purge all messages in a list of rooms up to a certain event. You can select a 
+timeframe or a number of messages that you want to keep in the room.
+
+Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
+the script.
+
+# `purge_remote_media.sh`
+
+A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to 
+purge all old cached remote media.
diff --git a/contrib/purge_api/purge_history.sh b/contrib/purge_api/purge_history.sh
new file mode 100644
index 0000000000..e7dd5d6468
--- /dev/null
+++ b/contrib/purge_api/purge_history.sh
@@ -0,0 +1,141 @@
+#!/bin/bash
+
+# this script will use the api:
+#    https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
+# 
+# It will purge all messages in a list of rooms up to a cetrain event
+
+###################################################################################################
+# define your domain and admin user
+###################################################################################################
+# add this user as admin in your home server:
+DOMAIN=yourserver.tld
+# add this user as admin in your home server:
+ADMIN="@you_admin_username:$DOMAIN"
+
+API_URL="$DOMAIN:8008/_matrix/client/r0"
+
+###################################################################################################
+#choose the rooms to prune old messages from (add a free comment at the end)
+###################################################################################################
+# the room_id's you can get e.g. from your Riot clients "View Source" button on each message
+ROOMS_ARRAY=(
+'!DgvjtOljKujDBrxyHk:matrix.org#riot:matrix.org'
+'!QtykxKocfZaZOUrTwp:matrix.org#Matrix HQ'
+)
+
+# ALTERNATIVELY:
+# you can select all the rooms that are not encrypted and loop over the result:
+# SELECT room_id FROM rooms WHERE room_id NOT IN (SELECT DISTINCT room_id FROM events WHERE type ='m.room.encrypted')
+# or
+# select all rooms with at least 100 members:
+# SELECT q.room_id FROM (select count(*) as numberofusers, room_id FROM current_state_events WHERE type ='m.room.member'
+#   GROUP BY room_id) AS q LEFT JOIN room_aliases a ON q.room_id=a.room_id WHERE q.numberofusers > 100 ORDER BY numberofusers desc
+
+###################################################################################################
+# evaluate the EVENT_ID before which should be pruned
+###################################################################################################
+# choose a time before which the messages should be pruned:
+TIME='12 months ago'
+# ALTERNATIVELY:
+# a certain time:
+# TIME='2016-08-31 23:59:59'
+
+# creates a timestamp from the given time string:
+UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
+
+# ALTERNATIVELY:
+# prune all messages that are older than 1000 messages ago:
+# LAST_MESSAGES=1000
+# SQL_GET_EVENT="SELECT event_id from events WHERE type='m.room.message' AND room_id ='$ROOM' ORDER BY received_ts DESC LIMIT 1 offset $(($LAST_MESSAGES - 1))"
+
+# ALTERNATIVELY:
+# select the EVENT_ID manually:
+#EVENT_ID='$1471814088343495zpPNI:matrix.org' # an example event from 21st of Aug 2016 by Matthew
+
+###################################################################################################
+# make the admin user a server admin in the database with
+###################################################################################################
+# psql -A -t --dbname=synapse -c "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
+
+###################################################################################################
+# database function
+###################################################################################################
+sql (){
+  # for sqlite3:
+  #sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
+  # for postgres:
+  psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
+}
+
+###################################################################################################
+# get an access token
+###################################################################################################
+# for example externally by watching Riot in your browser's network inspector
+# or internally on the server locally, use this:
+TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
+AUTH="Authorization: Bearer $TOKEN"
+
+###################################################################################################
+# check, if your TOKEN works. For example this works: 
+###################################################################################################
+# $ curl --header "$AUTH" "$API_URL/rooms/$ROOM/state/m.room.power_levels" 
+
+###################################################################################################
+# finally start pruning the room:
+###################################################################################################
+POSTDATA='{"delete_local_events":"true"}' # this will really delete local events, so the messages in the room really disappear unless they are restored by remote federation
+
+for ROOM in "${ROOMS_ARRAY[@]}"; do
+    echo "########################################### $(date) ################# "
+    echo "pruning room: $ROOM ..."
+    ROOM=${ROOM%#*}
+    #set -x
+    echo "check for alias in db..."
+    # for postgres:
+    sql "SELECT * FROM room_aliases WHERE room_id='$ROOM'"
+    echo "get event..."
+    # for postgres:
+    EVENT_ID=$(sql "SELECT event_id FROM events WHERE type='m.room.message' AND received_ts<'$UNIX_TIMESTAMP' AND room_id='$ROOM' ORDER BY received_ts DESC LIMIT 1;")
+    if [ "$EVENT_ID" == "" ]; then
+      echo "no event $TIME"
+    else
+      echo "event: $EVENT_ID"
+      SLEEP=2
+      set -x
+      # call purge
+      OUT=$(curl --header "$AUTH" -s -d $POSTDATA POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
+      PURGE_ID=$(echo "$OUT" |grep purge_id|cut -d'"' -f4 )
+      if [ "$PURGE_ID" == "" ]; then
+        # probably the history purge is already in progress for $ROOM
+        : "continuing with next room"
+      else
+        while : ; do
+          # get status of purge and sleep longer each time if still active
+          sleep $SLEEP
+          STATUS=$(curl --header "$AUTH" -s GET "$API_URL/admin/purge_history_status/$PURGE_ID" |grep status|cut -d'"' -f4)
+          : "$ROOM --> Status: $STATUS"
+          [[ "$STATUS" == "active" ]] || break 
+          SLEEP=$((SLEEP + 1))
+        done 
+      fi
+      set +x
+      sleep 1
+    fi  
+done
+
+
+###################################################################################################
+# additionally
+###################################################################################################
+# to benefit from pruning large amounts of data, you need to call VACUUM to free the unused space.
+# This can take a very long time (hours) and the client have to be stopped while you do so:
+# $ synctl stop
+# $ sqlite3 -line homeserver.db "vacuum;"
+# $ synctl start
+
+# This could be set, so you don't need to prune every time after deleting some rows:
+# $ sqlite3 homeserver.db "PRAGMA auto_vacuum = FULL;"
+# be cautious, it could make the database somewhat slow if there are a lot of deletions
+
+exit
diff --git a/contrib/purge_api/purge_remote_media.sh b/contrib/purge_api/purge_remote_media.sh
new file mode 100644
index 0000000000..99c07c663d
--- /dev/null
+++ b/contrib/purge_api/purge_remote_media.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+DOMAIN=yourserver.tld
+# add this user as admin in your home server:
+ADMIN="@you_admin_username:$DOMAIN"
+
+API_URL="$DOMAIN:8008/_matrix/client/r0"
+
+# choose a time before which the messages should be pruned:
+# TIME='2016-08-31 23:59:59'
+TIME='12 months ago'
+
+# creates a timestamp from the given time string:
+UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
+
+
+###################################################################################################
+# database function
+###################################################################################################
+sql (){
+  # for sqlite3:
+  #sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
+  # for postgres:
+  psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
+}
+
+###############################################################################
+# make the admin user a server admin in the database with
+###############################################################################
+# sql "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
+
+###############################################################################
+# get an access token
+###############################################################################
+# for example externally by watching Riot in your browser's network inspector
+# or internally on the server locally, use this:
+TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
+
+###############################################################################
+# check, if your TOKEN works. For example this works: 
+###############################################################################
+# curl --header "Authorization: Bearer $TOKEN" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
+
+###############################################################################
+# optional check size before
+###############################################################################
+# echo calculate used storage before ...
+# du -shc ../.synapse/media_store/*
+
+###############################################################################
+# finally start pruning media:
+###############################################################################
+set -x # for debugging the generated string
+curl --header "Authorization: Bearer $TOKEN" -v POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
diff --git a/contrib/systemd/matrix-synapse.service b/contrib/systemd/matrix-synapse.service
new file mode 100644
index 0000000000..efb157e941
--- /dev/null
+++ b/contrib/systemd/matrix-synapse.service
@@ -0,0 +1,31 @@
+# Example systemd configuration file for synapse. Copy into
+#    /etc/systemd/system/, update the paths if necessary, then:
+#
+#    systemctl enable matrix-synapse
+#    systemctl start matrix-synapse
+#
+# This assumes that Synapse has been installed in a virtualenv in
+# /opt/synapse/env.
+#
+# **NOTE:** This is an example service file that may change in the future. If you
+# wish to use this please copy rather than symlink it.
+
+[Unit]
+Description=Synapse Matrix homeserver
+
+[Service]
+Type=simple
+Restart=on-abort
+
+User=synapse
+Group=nogroup
+
+WorkingDirectory=/opt/synapse
+ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
+
+# adjust the cache factor if necessary
+# Environment=SYNAPSE_CACHE_FACTOR=2.0
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/contrib/systemd/synapse.service b/contrib/systemd/synapse.service
deleted file mode 100644
index b81ce3915d..0000000000
--- a/contrib/systemd/synapse.service
+++ /dev/null
@@ -1,22 +0,0 @@
-# This assumes that Synapse has been installed as a system package
-# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
-# rather than in a user home directory or similar under virtualenv.
-
-# **NOTE:** This is an example service file that may change in the future. If you
-# wish to use this please copy rather than symlink it.
-
-[Unit]
-Description=Synapse Matrix homeserver
-
-[Service]
-Type=simple
-User=synapse
-Group=synapse
-WorkingDirectory=/var/lib/synapse
-ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
-ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
-# EnvironmentFile=-/etc/sysconfig/synapse  # Can be used to e.g. set SYNAPSE_CACHE_FACTOR 
-
-[Install]
-WantedBy=multi-user.target
-
diff --git a/debian/.gitignore b/debian/.gitignore
new file mode 100644
index 0000000000..f027374ae2
--- /dev/null
+++ b/debian/.gitignore
@@ -0,0 +1,7 @@
+/matrix-synapse-py3.*.debhelper
+/matrix-synapse-py3.debhelper.log
+/matrix-synapse-py3.substvars
+/matrix-synapse-*/
+/files
+/debhelper-build-stamp
+/.debhelper
diff --git a/debian/NEWS b/debian/NEWS
new file mode 100644
index 0000000000..7b032d86bd
--- /dev/null
+++ b/debian/NEWS
@@ -0,0 +1,32 @@
+matrix-synapse-py3 (0.34.0) stable; urgency=medium
+
+  matrix-synapse-py3 is intended as a drop-in replacement for the existing
+  matrix-synapse package. When the package is installed, matrix-synapse will be
+  automatically uninstalled. The replacement should be relatively seamless,
+  however, please note the following important differences to matrix-synapse:
+
+  * Most importantly, the matrix-synapse service now runs under Python 3 rather
+    than Python 2.7.
+
+  * Synapse is installed into its own virtualenv (in /opt/venvs/matrix-synapse)
+    instead of using the system python libraries. (This may mean that you can
+    remove a number of old dependencies with `apt autoremove`).
+
+  * If you have previously manually installed any custom python extensions
+    (such as matrix-synapse-rest-auth) into the system python directories, you
+    will need to reinstall them in the new virtualenv. Please consult the
+    documentation of the relevant extensions for further details.
+
+  matrix-synapse-py3 will take over responsibility for the existing
+  configuration files, including the matrix-synapse systemd service.
+
+  Beware, however, that `apt purge matrix-synapse` will *disable* the
+  matrix-synapse service (so that it will not be started on reboot), even
+  though that service is no longer being provided by the matrix-synapse
+  package. It can be re-enabled with `systemctl enable matrix-synapse`.
+
+  The matrix.org team will continue to provide Python 2 `matrix-synapse`
+  packages for the next couple of releases, to allow time for system
+  administrators to test the new packages.
+
+ -- Richard van der Hoff <richard@matrix.org>  Wed, 19 Dec 2018 14:00:00 +0000
diff --git a/debian/build_virtualenv b/debian/build_virtualenv
new file mode 100755
index 0000000000..bead3ebc6e
--- /dev/null
+++ b/debian/build_virtualenv
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+# runs dh_virtualenv to build the virtualenv in the build directory,
+# and then runs the trial tests against the installed synapse.
+
+set -e
+
+export DH_VIRTUALENV_INSTALL_ROOT=/opt/venvs
+
+# make sure that the virtualenv links to the specific version of python, by
+# dereferencing the python3 symlink.
+#
+# Otherwise, if somebody tries to install (say) the stretch package on buster,
+# they will get a confusing error about "No module named 'synapse'", because
+# python won't look in the right directory. At least this way, the error will
+# be a *bit* more obvious.
+#
+SNAKE=`readlink -e /usr/bin/python3`
+
+# try to set the CFLAGS so any compiled C extensions are compiled with the most
+# generic as possible x64 instructions, so that compiling it on a new Intel chip
+# doesn't enable features not available on older ones or AMD.
+#
+# TODO: add similar things for non-amd64, or figure out a more generic way to
+# do this.
+
+case `dpkg-architecture -q DEB_HOST_ARCH` in
+    amd64)
+        export CFLAGS=-march=x86-64
+        ;;
+esac
+
+# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
+# than the 2/3 compatible `virtualenv`.
+
+dh_virtualenv \
+    --install-suffix "matrix-synapse" \
+    --builtin-venv \
+    --setuptools \
+    --python "$SNAKE" \
+    --upgrade-pip \
+    --preinstall="lxml" \
+    --preinstall="mock" \
+    --extra-pip-arg="--no-cache-dir" \
+    --extra-pip-arg="--compile" \
+    --extras="all"
+
+PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
+VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
+TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
+
+# we copy the tests to a temporary directory so that we can put them on the
+# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
+tmpdir=`mktemp -d`
+trap "rm -r $tmpdir" EXIT
+
+cp -r tests "$tmpdir"
+
+PYTHONPATH="$tmpdir" \
+    "${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
+
+# build the config file
+"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
+        --config-dir="/etc/matrix-synapse" \
+        --data-dir="/var/lib/matrix-synapse" |
+    perl -pe '
+# tweak the paths to the tls certs and signing keys
+/^tls_.*_path:/ and s/SERVERNAME/homeserver/;
+/^signing_key_path:/ and s/SERVERNAME/homeserver/;
+
+# tweak the pid file location
+/^pid_file:/ and s#:.*#: "/var/run/matrix-synapse.pid"#;
+
+# tweak the path to the log config
+/^log_config:/ and s/SERVERNAME\.log\.config/log.yaml/;
+
+# tweak the path to the media store
+/^media_store_path:/ and s#/media_store#/media#;
+
+# remove the server_name setting, which is set in a separate file
+/^server_name:/ and $_ = "#\n# This is set in /etc/matrix-synapse/conf.d/server_name.yaml for Debian installations.\n# $_";
+
+# remove the report_stats setting, which is set in a separate file
+/^# report_stats:/ and $_ = "";
+
+' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
+
+
+# add a dependency on the right version of python to substvars.
+PYPKG=`basename $SNAKE`
+echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000000..fd77ce13a2
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,698 @@
+matrix-synapse-py3 (0.99.2) stable; urgency=medium
+
+  * Fix overwriting of config settings on upgrade.
+  * New synapse release 0.99.2.
+
+ -- Synapse Packaging team <packages@matrix.org>  Fri, 01 Mar 2019 10:55:08 +0000
+
+matrix-synapse-py3 (0.99.1.1) stable; urgency=medium
+
+  * New synapse release 0.99.1.1
+
+ -- Synapse Packaging team <packages@matrix.org>  Thu, 14 Feb 2019 17:19:44 +0000
+
+matrix-synapse-py3 (0.99.1) stable; urgency=medium
+
+  [ Damjan Georgievski ]
+  * Added ExecReload= in service unit file to send a HUP signal
+
+  [ Synapse Packaging team ]
+  * New synapse release 0.99.1
+
+ -- Synapse Packaging team <packages@matrix.org>  Thu, 14 Feb 2019 14:12:26 +0000
+
+matrix-synapse-py3 (0.99.0) stable; urgency=medium
+
+  * New synapse release 0.99.0
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 5 Feb 2019 18:25:00 +0000
+
+matrix-synapse-py3 (0.34.1.1++1) stable; urgency=medium
+
+  * Update conflicts specifications to allow smoother transition from matrix-synapse.
+
+ -- Synapse Packaging team <packages@matrix.org>  Sat, 12 Jan 2019 12:58:35 +0000
+
+matrix-synapse-py3 (0.34.1.1) stable; urgency=high
+
+  * New synapse release 0.34.1.1
+
+ -- Synapse Packaging team <packages@matrix.org>  Thu, 10 Jan 2019 15:04:52 +0000
+
+matrix-synapse-py3 (0.34.1+1) stable; urgency=medium
+
+  * Remove 'Breaks: matrix-synapse-ldap3'. (matrix-synapse-py3 includes
+    the matrix-synapse-ldap3 python files, which makes the
+    matrix-synapse-ldap3 debian package redundant but not broken.
+
+ -- Synapse Packaging team <packages@matrix.org>  Wed, 09 Jan 2019 15:30:00 +0000
+
+matrix-synapse-py3 (0.34.1) stable; urgency=medium
+
+  * New synapse release 0.34.1.
+  * Update Conflicts specifications to allow installation alongside our
+    matrix-synapse transitional package.
+
+ -- Synapse Packaging team <packages@matrix.org>  Wed, 09 Jan 2019 14:52:24 +0000
+
+matrix-synapse-py3 (0.34.0) stable; urgency=medium
+
+  * New synapse release 0.34.0.
+  * Synapse is now installed into a Python 3 virtual environment with
+    up-to-date dependencies.
+  * The matrix-synapse service will now be restarted when the package is
+    upgraded.
+    (Fixes https://github.com/matrix-org/package-synapse-debian/issues/18)
+
+ -- Synapse packaging team <packages@matrix.org>  Wed, 19 Dec 2018 14:00:00 +0000
+
+matrix-synapse (0.33.9-1matrix1) stretch; urgency=medium
+
+  [ Erik Johnston ]
+  * Remove dependency on python-pydenticon
+
+  [ Richard van der Hoff ]
+  * New upstream version 0.33.9
+  * Refresh patches for 0.33.9
+
+ -- Richard van der Hoff <richard@matrix.org>  Tue, 20 Nov 2018 10:26:05 +0000
+
+matrix-synapse (0.33.8-1) stretch; urgency=medium
+
+  * New upstream version 0.33.8
+
+ -- Erik Johnston <erik@matrix.org>  Thu, 01 Nov 2018 14:33:26 +0000
+
+matrix-synapse (0.33.7-1matrix1) stretch; urgency=medium
+
+  * New upstream version 0.33.7
+
+ -- Richard van der Hoff <richard@matrix.org>  Thu, 18 Oct 2018 16:18:26 +0100
+
+matrix-synapse (0.33.6-1matrix1) stretch; urgency=medium
+
+  * Imported Upstream version 0.33.6
+  * Remove redundant explicit dep on python-bcrypt
+  * Run the tests during build
+  * Add dependency on python-attr 16.0
+  * Refresh patches for 0.33.6
+
+ -- Richard van der Hoff <richard@matrix.org>  Thu, 04 Oct 2018 14:40:29 +0100
+
+matrix-synapse (0.33.5.1-1matrix1) stretch; urgency=medium
+
+  * Imported Upstream version 0.33.5.1
+
+ -- Richard van der Hoff <richard@matrix.org>  Mon, 24 Sep 2018 18:20:51 +0100
+
+matrix-synapse (0.33.5-1matrix1) stretch; urgency=medium
+
+  * Imported Upstream version 0.33.5
+
+ -- Richard van der Hoff <richard@matrix.org>  Mon, 24 Sep 2018 16:06:23 +0100
+
+matrix-synapse (0.33.4-1mx1) stretch; urgency=medium
+
+  * Imported Upstream version 0.33.4
+  * Avoid telling people to install packages with pip
+    (fixes https://github.com/matrix-org/synapse/issues/3743)
+
+ -- Richard van der Hoff <richard@matrix.org>  Fri, 07 Sep 2018 14:06:17 +0100
+
+matrix-synapse (0.33.3.1-1mx1) stretch; urgency=critical
+
+  [ Richard van der Hoff ]
+  * Imported Upstream version 0.33.3.1
+
+ -- Richard van der Hoff <richard@matrix.org>  Thu, 06 Sep 2018 11:20:37 +0100
+
+matrix-synapse (0.33.3-2) stretch; urgency=medium
+
+  * We now require python-twisted 17.1.0 or later
+  * Add recommendations for python-psycopg2 and python-lxml
+
+ -- Richard van der Hoff <richard@matrix.org>  Thu, 23 Aug 2018 19:04:08 +0100
+
+matrix-synapse (0.33.3-1) jessie; urgency=medium
+
+  * New upstream version 0.33.3
+
+ -- Richard van der Hoff <richard@matrix.org>  Wed, 22 Aug 2018 14:50:30 +0100
+
+matrix-synapse (0.33.2-1) jessie; urgency=medium
+
+  * New upstream version 0.33.2
+
+ -- Richard van der Hoff <richard@matrix.org>  Thu, 09 Aug 2018 15:40:42 +0100
+
+matrix-synapse (0.33.1-1) jessie; urgency=medium
+
+  * New upstream version 0.33.1
+
+ -- Erik Johnston <erik@matrix.org>  Thu, 02 Aug 2018 15:52:19 +0100
+
+matrix-synapse (0.33.0-1) jessie; urgency=medium
+
+  * New upstream version 0.33.0
+
+ -- Richard van der Hoff <richard@matrix.org>  Thu, 19 Jul 2018 13:38:41 +0100
+
+matrix-synapse (0.32.1-1) jessie; urgency=medium
+
+  * New upstream version 0.32.1
+
+ -- Richard van der Hoff <richard@matrix.org>  Fri, 06 Jul 2018 17:16:29 +0100
+
+matrix-synapse (0.32.0-1) jessie; urgency=medium
+
+  * New upstream version 0.32.0
+
+ -- Erik Johnston <erik@matrix.org>  Fri, 06 Jul 2018 15:34:06 +0100
+
+matrix-synapse (0.31.2-1) jessie; urgency=high
+
+  * New upstream version 0.31.2
+
+ -- Richard van der Hoff <richard@matrix.org>  Thu, 14 Jun 2018 16:49:07 +0100
+
+matrix-synapse (0.31.1-1) jessie; urgency=medium
+
+  * New upstream version 0.31.1
+  * Require python-prometheus-client >= 0.0.14
+
+ -- Richard van der Hoff <richard@matrix.org>  Fri, 08 Jun 2018 16:11:55 +0100
+
+matrix-synapse (0.31.0-1) jessie; urgency=medium
+
+  * New upstream version 0.31.0
+
+ -- Richard van der Hoff <richard@matrix.org>  Wed, 06 Jun 2018 17:23:10 +0100
+
+matrix-synapse (0.30.0-1) jessie; urgency=medium
+
+  [ Michael Kaye ]
+  * update homeserver.yaml to be somewhat more modern.
+
+  [ Erik Johnston ]
+  * New upstream version 0.30.0
+
+ -- Erik Johnston <erik@matrix.org>  Thu, 24 May 2018 16:43:16 +0100
+
+matrix-synapse (0.29.0-1) jessie; urgency=medium
+
+  * New upstream version 0.29.0
+
+ -- Erik Johnston <erik@matrix.org>  Wed, 16 May 2018 17:43:06 +0100
+
+matrix-synapse (0.28.1-1) jessie; urgency=medium
+
+  * New upstream version 0.28.1
+
+ -- Erik Johnston <erik@matrix.org>  Tue, 01 May 2018 19:21:39 +0100
+
+matrix-synapse (0.28.0-1) jessie; urgency=medium
+
+  * New upstream 0.28.0
+
+ -- Erik Johnston <erik@matrix.org>  Fri, 27 Apr 2018 13:15:49 +0100
+
+matrix-synapse (0.27.4-1) jessie; urgency=medium
+
+  * Bump canonicaljson version
+  * New upstream 0.27.4
+
+ -- Erik Johnston <erik@matrix.org>  Fri, 13 Apr 2018 13:37:47 +0100
+
+matrix-synapse (0.27.3-1) jessie; urgency=medium
+
+  * Report stats should default to off
+  * Refresh patches
+  * New upstream 0.27.3
+
+ -- Erik Johnston <erik@matrix.org>  Wed, 11 Apr 2018 11:43:47 +0100
+
+matrix-synapse (0.27.2-1) jessie; urgency=medium
+
+  * New upstream version 0.27.2
+
+ -- Erik Johnston <erik@matrix.org>  Mon, 26 Mar 2018 16:41:57 +0100
+
+matrix-synapse (0.27.1-1) jessie; urgency=medium
+
+  * New upstream version 0.27.1
+
+ -- Erik Johnston <erik@matrix.org>  Mon, 26 Mar 2018 16:22:03 +0100
+
+matrix-synapse (0.27.0-2) jessie; urgency=medium
+
+  * Fix bcrypt dependency
+
+ -- Erik Johnston <erik@matrix.org>  Mon, 26 Mar 2018 16:00:26 +0100
+
+matrix-synapse (0.27.0-1) jessie; urgency=medium
+
+  * New upstream version 0.27.0
+
+ -- Erik Johnston <erik@matrix.org>  Mon, 26 Mar 2018 15:07:52 +0100
+
+matrix-synapse (0.26.1-1) jessie; urgency=medium
+
+  * Ignore RC
+  * New upstream version 0.26.1
+
+ -- Erik Johnston <erik@matrix.org>  Fri, 16 Mar 2018 00:40:08 +0000
+
+matrix-synapse (0.26.0-1) jessie; urgency=medium
+
+  [ Richard van der Hoff ]
+  * Remove `level` for `file` log handler
+
+  [ Erik Johnston ]
+
+ -- Erik Johnston <erik@matrix.org>  Fri, 05 Jan 2018 11:21:26 +0000
+
+matrix-synapse (0.25.1-1) jessie; urgency=medium
+
+  * New upstream version 0.25.1
+
+ -- Erik Johnston <erik@matrix.org>  Mon, 20 Nov 2017 10:05:37 +0000
+
+matrix-synapse (0.25.0-1) jessie; urgency=medium
+
+  * New upstream version 0.25.0
+
+ -- Erik Johnston <erik@matrix.org>  Wed, 15 Nov 2017 11:36:32 +0000
+
+matrix-synapse (0.24.1-1) jessie; urgency=medium
+
+  * New upstream version 0.24.1
+
+ -- Erik Johnston <erik@matrix.org>  Tue, 24 Oct 2017 15:05:03 +0100
+
+matrix-synapse (0.24.0-1) jessie; urgency=medium
+
+  * New upstream version 0.24.0
+
+ -- Erik Johnston <erik@matrix.org>  Mon, 23 Oct 2017 14:11:46 +0100
+
+matrix-synapse (0.23.1-1) xenial; urgency=medium
+
+  * Imported upstream version 0.23.1
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 05 Oct 2017 15:28:25 +0100
+
+matrix-synapse (0.23.0-1) jessie; urgency=medium
+
+  * Fix patch after refactor
+  * Add patch to remove requirement on affinity package
+  * refresh webclient patch
+
+ -- Erik Johnston <erikj@matrix.org>  Mon, 02 Oct 2017 15:34:57 +0100
+
+matrix-synapse (0.22.1-1) jessie; urgency=medium
+
+  * Imported Upstream version 0.22.1
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 06 Jul 2017 18:14:13 +0100
+
+matrix-synapse (0.22.0-1) jessie; urgency=medium
+
+  * Imported upstream version 0.22.0
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 06 Jul 2017 10:47:45 +0100
+
+matrix-synapse (0.21.1-1) jessie; urgency=medium
+
+  * Imported upstream version 0.21.1
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 15 Jun 2017 13:31:13 +0100
+
+matrix-synapse (0.21.0-1) jessie; urgency=medium
+
+  * Imported upstream version 0.21.0
+  * Update patches
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 18 May 2017 14:16:54 +0100
+
+matrix-synapse (0.20.0-2) jessie; urgency=medium
+
+  * Depend on python-jsonschema
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 12 Apr 2017 10:41:46 +0100
+
+matrix-synapse (0.20.0-1) jessie; urgency=medium
+
+  * Imported upstream version 0.20.0
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 11 Apr 2017 12:58:26 +0100
+
+matrix-synapse (0.19.3-1) jessie; urgency=medium
+
+  * Imported upstream version 0.19.3
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 21 Mar 2017 13:45:41 +0000
+
+matrix-synapse (0.19.2-1) jessie; urgency=medium
+
+  [ Sunil Mohan Adapa ]
+  * Bump standards version to 3.9.8
+  * Add debian/copyright file
+  * Don't ignore errors in debian/config
+  * Reformat depenedencies in debian/control
+  * Internationalize strings in template file
+  * Update package description
+  * Add lsb-base as dependency
+  * Update questions for debconf style
+  * Add man pages for all binaries
+
+  [ Erik Johnston ]
+  * Imported upstream version 0.19.2
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 21 Feb 2017 13:55:00 +0000
+
+matrix-synapse (0.19.1-1) jessie; urgency=medium
+
+  * Imported upstream version 0.19.1
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 09 Feb 2017 11:53:27 +0000
+
+matrix-synapse (0.19.0-1) jessie; urgency=medium
+
+  This build requires python-twisted 0.19.0, which may need to be installed
+  from backports.
+
+  [ Bryce Chidester ]
+  * Add EnvironmentFile to the systemd service
+  * Create matrix-synapse.default
+
+  [ Erik Johnston ]
+  * Imported upstream version 0.19.0
+
+ -- Erik Johnston <erikj@matrix.org>  Sat, 04 Feb 2017 09:58:29 +0000
+
+matrix-synapse (0.18.7-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.18.4
+
+ -- Erik Johnston <erikj@matrix.org>  Mon, 09 Jan 2017 15:10:21 +0000
+
+matrix-synapse (0.18.5-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.18.5
+
+ -- Erik Johnston <erikj@matrix.org>  Fri, 16 Dec 2016 10:51:59 +0000
+
+matrix-synapse (0.18.4-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.18.4
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 22 Nov 2016 10:33:41 +0000
+
+matrix-synapse (0.18.3-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.18.3
+  * Remove upstreamed ldap3 patch
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 08 Nov 2016 15:01:49 +0000
+
+matrix-synapse (0.18.2-2) trusty; urgency=high
+
+  * Patch ldap3 support to workaround differences in python-ldap3 0.9,
+    bug allowed unauthorized logins if ldap3 0.9 was used.
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 08 Nov 2016 13:48:09 +0000
+
+matrix-synapse (0.18.2-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.18.2
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 01 Nov 2016 13:30:45 +0000
+
+matrix-synapse (0.18.1-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.18.1
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 05 Oct 2016 14:52:53 +0100
+
+matrix-synapse (0.18.0-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.18.0
+
+ -- Erik Johnston <erikj@matrix.org>  Mon, 19 Sep 2016 17:38:48 +0100
+
+matrix-synapse (0.17.3-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.17.3
+
+ -- Erik Johnston <erikj@matrix.org>  Fri, 09 Sep 2016 11:18:18 +0100
+
+matrix-synapse (0.17.2-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.17.2
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 08 Sep 2016 15:37:14 +0100
+
+matrix-synapse (0.17.1-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.17.1
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 24 Aug 2016 15:11:29 +0100
+
+matrix-synapse (0.17.0-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.17.0
+
+ -- Erik Johnston <erikj@matrix.org>  Mon, 08 Aug 2016 13:56:15 +0100
+
+matrix-synapse (0.16.1-r1-1) trusty; urgency=medium
+
+  * Imported Upstream version 0.16.1-r1
+
+ -- Erik Johnston <erikj@matrix.org>  Fri, 08 Jul 2016 16:47:35 +0100
+
+matrix-synapse (0.16.1-2) trusty; urgency=critical
+
+  * Apply security patch
+
+ -- Erik Johnston <erikj@matrix.org>  Fri, 08 Jul 2016 11:05:27 +0100
+
+matrix-synapse (0.16.1-1) trusty; urgency=medium
+
+  * New upstream release
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 21 Jun 2016 14:56:48 +0100
+
+matrix-synapse (0.16.0-3) trusty; urgency=medium
+
+  * Don't require strict nacl==0.3.0 requirement
+
+ -- Erik Johnston <erikj@matrix.org>  Mon, 20 Jun 2016 13:24:22 +0100
+
+matrix-synapse (0.16.0-2) trusty; urgency=medium
+
+  * Also change the permissions of /etc/matrix-synapse
+  * Add apt webclient instructions
+  * Fix up patches
+  * Update default homeserver.yaml
+  * Add patch
+
+ -- Erik Johnston <erikj@matrix.org>  Fri, 10 Jun 2016 14:06:20 +0100
+
+matrix-synapse (0.16.0-1) trusty; urgency=medium
+
+  [ David A Roberts ]
+  * systemd
+
+  [ Erik Johnston ]
+  * Fixup postinst and matrix-synapse.service
+  * Handle email optional deps
+  * New upstream release
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 09 Jun 2016 16:17:01 +0100
+
+matrix-synapse (0.14.0-1) trusty; urgency=medium
+
+  * Remove saml2 module requirements
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 30 Mar 2016 14:31:17 +0100
+
+matrix-synapse (0.13.3-1) trusty; urgency=medium
+
+  * New upstream release
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 11 Feb 2016 16:35:39 +0000
+
+matrix-synapse (0.13.2-1) trusty; urgency=medium
+
+  * New upstream release
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 11 Feb 2016 11:01:16 +0000
+
+matrix-synapse (0.13.0-1) trusty; urgency=medium
+
+  * New upstream release
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 10 Feb 2016 16:34:39 +0000
+
+matrix-synapse (0.12.0-2) trusty; urgency=medium
+
+  * Don't default `registerion_shared_secret` config option
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 06 Jan 2016 16:34:02 +0000
+
+matrix-synapse (0.12.0-1) stable; urgency=medium
+
+  * Imported Upstream version 0.12.0
+
+ -- Mark Haines <mark@matrix.org>  Mon, 04 Jan 2016 15:38:33 +0000
+
+matrix-synapse (0.11.1-1) unstable; urgency=medium
+
+  * Imported Upstream version 0.11.1
+
+ -- Erik Johnston <erikj@matrix.org>  Fri, 20 Nov 2015 17:56:52 +0000
+
+matrix-synapse (0.11.0-r2-1) stable; urgency=medium
+
+  * Imported Upstream version 0.11.0-r2
+  * Add gbp.conf
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 19 Nov 2015 13:52:36 +0000
+
+matrix-synapse (0.11.0-1) wheezy; urgency=medium
+
+  * Fix dependencies.
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 17 Nov 2015 16:28:06 +0000
+
+matrix-synapse (0.11.0-0) wheezy; urgency=medium
+
+  * New upstream release
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 17 Nov 2015 16:03:01 +0000
+
+matrix-synapse (0.10.0-2) wheezy; urgency=medium
+
+  * Rebuild for wheezy.
+
+ -- Erik Johnston <erikj@matrix.org>  Fri, 04 Sep 2015 14:21:03 +0100
+
+matrix-synapse (0.10.0-1) trusty; urgency=medium
+
+  * New upstream release
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 03 Sep 2015 10:08:34 +0100
+
+matrix-synapse (0.10.0~rc6-3) trusty; urgency=medium
+
+  * Create log directory.
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 02 Sep 2015 17:49:07 +0100
+
+matrix-synapse (0.10.0~rc6-2) trusty; urgency=medium
+
+  * Add patch to work around upstream bug in config directory handling.
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 02 Sep 2015 17:42:42 +0100
+
+matrix-synapse (0.10.0~rc6-1) trusty; urgency=medium
+
+  * New upstream release
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 02 Sep 2015 17:21:21 +0100
+
+matrix-synapse (0.10.0~rc5-3) trusty; urgency=medium
+
+  * Update init script to work.
+
+ -- Erik Johnston <erikj@matrix.org>  Fri, 28 Aug 2015 10:51:56 +0100
+
+matrix-synapse (0.10.0~rc5-2) trusty; urgency=medium
+
+  * Fix where python files are installed.
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 27 Aug 2015 11:55:39 +0100
+
+matrix-synapse (0.10.0~rc5-1) trusty; urgency=medium
+
+  * New upstream release
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 27 Aug 2015 11:26:54 +0100
+
+matrix-synapse (0.10.0~rc4-1) trusty; urgency=medium
+
+  * New upstream version.
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 27 Aug 2015 10:29:31 +0100
+
+matrix-synapse (0.10.0~rc3-7) trusty; urgency=medium
+
+  * Add debian/watch
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 26 Aug 2015 17:57:08 +0100
+
+matrix-synapse (0.10.0~rc3-6) trusty; urgency=medium
+
+  * Deps.
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 26 Aug 2015 17:07:13 +0100
+
+matrix-synapse (0.10.0~rc3-5) trusty; urgency=medium
+
+  * Deps.
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 26 Aug 2015 16:18:02 +0100
+
+matrix-synapse (0.10.0~rc3-4) trusty; urgency=medium
+
+  * More deps.
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 26 Aug 2015 14:09:27 +0100
+
+matrix-synapse (0.10.0~rc3-3) trusty; urgency=medium
+
+  * Update deps.
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 26 Aug 2015 13:49:20 +0100
+
+matrix-synapse (0.10.0~rc3-2) trusty; urgency=medium
+
+  * Add more deps.
+
+ -- Erik Johnston <erikj@matrix.org>  Wed, 26 Aug 2015 13:25:45 +0100
+
+matrix-synapse (0.10.0~rc3-1) trusty; urgency=medium
+
+  * New upstream release
+
+ -- Erik Johnston <erikj@matrix.org>  Tue, 25 Aug 2015 17:52:33 +0100
+
+matrix-synapse (0.9.3-1~trusty1) trusty; urgency=medium
+
+  * Rebuild for trusty.
+
+ -- Erik Johnston <erikj@matrix.org>  Thu, 20 Aug 2015 15:05:43 +0100
+
+matrix-synapse (0.9.3-1) wheezy; urgency=medium
+
+  * New upstream release
+  * Create a user, "matrix-synapse", to run as
+  * Log to /var/log/matrix-synapse/ directory
+  * Override the way synapse looks for the angular SDK (syweb) so it finds the
+    packaged one
+
+ -- Paul "LeoNerd" Evans <paul@matrix.org>  Fri, 07 Aug 2015 15:32:12 +0100
+
+matrix-synapse (0.9.2-2) wheezy; urgency=medium
+
+  * Supply a default config file
+  * Create directory in /var/lib
+  * Use debconf to ask the user for the server name at installation time
+
+ -- Paul "LeoNerd" Evans <paul@matrix.org>  Thu, 06 Aug 2015 15:28:00 +0100
+
+matrix-synapse (0.9.2-1) wheezy; urgency=low
+
+  * source package automatically created by stdeb 0.8.2
+
+ -- Paul "LeoNerd" Evans <paul@matrix.org>  Fri, 12 Jun 2015 14:32:03 +0100
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000000..ec635144f6
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000000..4abfa02051
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,40 @@
+Source: matrix-synapse-py3
+Section: contrib/python
+Priority: extra
+Maintainer: Synapse Packaging team <packages@matrix.org>
+Build-Depends:
+ debhelper (>= 9),
+ dh-systemd,
+ dh-virtualenv (>= 1.1),
+ lsb-release,
+ python3-dev,
+ python3,
+ python3-setuptools,
+ python3-pip,
+ python3-venv,
+ tar,
+Standards-Version: 3.9.8
+Homepage: https://github.com/matrix-org/synapse
+
+Package: matrix-synapse-py3
+Architecture: amd64
+Provides: matrix-synapse
+Conflicts:
+ matrix-synapse (<< 0.34.0.1-0matrix2),
+ matrix-synapse (>= 0.34.0.1-1),
+Pre-Depends: dpkg (>= 1.16.1)
+Depends:
+ adduser,
+ debconf,
+ python3-distutils|libpython3-stdlib (<< 3.6),
+ ${misc:Depends},
+ ${synapse:pydepends},
+# some of our scripts use perl, but none of them are important,
+# so we put perl:Depends in Suggests rather than Depends.
+Suggests:
+ sqlite3,
+ ${perl:Depends},
+Description: Open federated Instant Messaging and VoIP server
+ Matrix is an ambitious new ecosystem for open federated Instant
+ Messaging and VoIP. Synapse is a reference Matrix server
+ implementation.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000000..95c21ea12a
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,118 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: synapse
+Source: https://github.com/matrix-org/synapse
+
+Files: *
+Copyright: 2014-2017, OpenMarket Ltd, 2017-2018 New Vector Ltd
+License: Apache-2.0
+
+Files: synapse/config/saml2.py
+Copyright: 2015, Ericsson
+License: Apache-2.0
+
+Files: synapse/config/jwt.py
+Copyright: 2015, Niklas Riekenbrauck
+License: Apache-2.0
+
+Files: synapse/config/workers.py
+Copyright: 2016, matrix.org
+License: Apache-2.0
+
+Files: synapse/config/repository.py
+Copyright: 2014-2015, matrix.org
+License: Apache-2.0
+
+Files: contrib/jitsimeetbridge/unjingle/strophe/base64.js
+Copyright: Public Domain (Tyler Akins http://rumkin.com)
+License: public-domain
+ This code was written by Tyler Akins and has been placed in the
+ public domain.  It would be nice if you left this header intact.
+ Base64 code from Tyler Akins -- http://rumkin.com
+
+Files: contrib/jitsimeetbridge/unjingle/strophe/md5.js
+Copyright: 1999-2002, Paul Johnston & Contributors
+License: BSD-3-clause
+
+Files: contrib/jitsimeetbridge/unjingle/strophe/strophe.js
+Copyright: 2006-2008, OGG, LLC
+License: Expat
+
+Files: contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js
+Copyright: 2010 passive.ly LLC
+License: Expat
+
+Files: contrib/jitsimeetbridge/unjingle/*.js
+Copyright: 2014 Jitsi
+License: Apache-2.0
+
+Files: debian/*
+Copyright: 2016-2017, Erik Johnston <erik@matrix.org>
+	   2017, Rahul De <rahulde@swecha.net>
+	   2017, Sunil Mohan Adapa <sunil@medhas.org>
+License: Apache-2.0
+
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the full text of the Apache License version
+ 2.0 can be found in the file
+ `/usr/share/common-licenses/Apache-2.0'.
+
+License: BSD-3-clause
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ .
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following
+ disclaimer. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided with
+ the distribution.
+ .
+ Neither the name of the author nor the names of its contributors may
+ be used to endorse or promote products derived from this software
+ without specific prior written permission.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+License: Expat
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+ .
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+ .
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/debian/dirs b/debian/dirs
new file mode 100644
index 0000000000..9d2a980c92
--- /dev/null
+++ b/debian/dirs
@@ -0,0 +1,3 @@
+etc/matrix-synapse
+var/lib/matrix-synapse
+var/log/matrix-synapse
diff --git a/debian/hash_password.1 b/debian/hash_password.1
new file mode 100644
index 0000000000..383f452991
--- /dev/null
+++ b/debian/hash_password.1
@@ -0,0 +1,90 @@
+.\" generated with Ronn/v0.7.3
+.\" http://github.com/rtomayko/ronn/tree/0.7.3
+.
+.TH "HASH_PASSWORD" "1" "February 2017" "" ""
+.
+.SH "NAME"
+\fBhash_password\fR \- Calculate the hash of a new password, so that passwords can be reset
+.
+.SH "SYNOPSIS"
+\fBhash_password\fR [\fB\-p\fR|\fB\-\-password\fR [password]] [\fB\-c\fR|\fB\-\-config\fR \fIfile\fR]
+.
+.SH "DESCRIPTION"
+\fBhash_password\fR calculates the hash of a supplied password using bcrypt\.
+.
+.P
+\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
+.
+.P
+It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
+.
+.P
+The hashed password is written on the \fBSTDOUT\fR\.
+.
+.SH "FILES"
+A sample YAML file accepted by \fBhash_password\fR is described below:
+.
+.P
+bcrypt_rounds: 17 password_config: pepper: "random hashing pepper"
+.
+.SH "OPTIONS"
+.
+.TP
+\fB\-p\fR, \fB\-\-password\fR
+Read the password form the command line if [password] is supplied\. If not, prompt the user and read the password form the \fBSTDIN\fR\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\.
+.
+.TP
+\fB\-c\fR, \fB\-\-config\fR
+Read the supplied YAML \fIfile\fR containing the options \fBbcrypt_rounds\fR and the \fBpassword_config\fR section containing the \fBpepper\fR value\.
+.
+.SH "EXAMPLES"
+Hash from the command line:
+.
+.IP "" 4
+.
+.nf
+
+$ hash_password \-p "p@ssw0rd"
+$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8\.X8fWFpum7SxZ9MFe
+.
+.fi
+.
+.IP "" 0
+.
+.P
+Hash from the STDIN:
+.
+.IP "" 4
+.
+.nf
+
+$ hash_password
+Password:
+Confirm password:
+$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX\.rcuAbM8ErLoUhybG
+.
+.fi
+.
+.IP "" 0
+.
+.P
+Using a config file:
+.
+.IP "" 4
+.
+.nf
+
+$ hash_password \-c config\.yml
+Password:
+Confirm password:
+$2b$12$CwI\.wBNr\.w3kmiUlV3T5s\.GT2wH7uebDCovDrCOh18dFedlANK99O
+.
+.fi
+.
+.IP "" 0
+.
+.SH "COPYRIGHT"
+This man page was written by Rahul De <\fIrahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
+.
+.SH "SEE ALSO"
+synctl(1), synapse_port_db(1), register_new_matrix_user(1)
diff --git a/debian/hash_password.ronn b/debian/hash_password.ronn
new file mode 100644
index 0000000000..0b2afa7374
--- /dev/null
+++ b/debian/hash_password.ronn
@@ -0,0 +1,69 @@
+hash_password(1) -- Calculate the hash of a new password, so that passwords can be reset
+========================================================================================
+
+## SYNOPSIS
+
+`hash_password` [`-p`|`--password` [password]] [`-c`|`--config` <file>]
+
+## DESCRIPTION
+
+**hash_password** calculates the hash of a supplied password using bcrypt.
+
+`hash_password` takes a password as an parameter either on the command line
+or the `STDIN` if not supplied.
+
+It accepts an YAML file which can be used to specify parameters like the
+number of rounds for bcrypt and password_config section having the pepper
+value used for the hashing. By default `bcrypt_rounds` is set to **10**.
+
+The hashed password is written on the `STDOUT`.
+
+## FILES
+
+A sample YAML file accepted by `hash_password` is described below:
+
+  bcrypt_rounds: 17
+  password_config:
+    pepper: "random hashing pepper"
+
+## OPTIONS
+
+  * `-p`, `--password`:
+    Read the password form the command line if [password] is supplied.
+    If not, prompt the user and read the password form the `STDIN`.
+    It is not recommended to type the password on the command line
+    directly. Use the STDIN instead.
+
+  * `-c`, `--config`:
+    Read the supplied YAML <file> containing the options `bcrypt_rounds`
+    and the `password_config` section containing the `pepper` value.
+
+## EXAMPLES
+
+Hash from the command line:
+
+    $ hash_password -p "p@ssw0rd"
+    $2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe
+
+Hash from the STDIN:
+
+    $ hash_password
+    Password:
+    Confirm password:
+    $2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
+
+Using a config file:
+
+    $ hash_password -c config.yml
+    Password:
+    Confirm password:
+    $2b$12$CwI.wBNr.w3kmiUlV3T5s.GT2wH7uebDCovDrCOh18dFedlANK99O
+
+## COPYRIGHT
+
+This man page was written by Rahul De <<rahulde@swecha.net>>
+for Debian GNU/Linux distribution.
+
+## SEE ALSO
+
+synctl(1), synapse_port_db(1), register_new_matrix_user(1)
diff --git a/debian/install b/debian/install
new file mode 100644
index 0000000000..43dc8c6904
--- /dev/null
+++ b/debian/install
@@ -0,0 +1,2 @@
+debian/log.yaml etc/matrix-synapse
+debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/
diff --git a/debian/log.yaml b/debian/log.yaml
new file mode 100644
index 0000000000..206b65f1ac
--- /dev/null
+++ b/debian/log.yaml
@@ -0,0 +1,36 @@
+
+version: 1
+
+formatters:
+  precise:
+   format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
+
+filters:
+  context:
+    (): synapse.util.logcontext.LoggingContextFilter
+    request: ""
+
+handlers:
+  file:
+    class: logging.handlers.RotatingFileHandler
+    formatter: precise
+    filename: /var/log/matrix-synapse/homeserver.log
+    maxBytes: 104857600
+    backupCount: 10
+    filters: [context]
+    encoding: utf8
+  console:
+    class: logging.StreamHandler
+    formatter: precise
+    level: WARN
+
+loggers:
+    synapse:
+        level: INFO
+
+    synapse.storage.SQL:
+        level: INFO
+
+root:
+    level: INFO
+    handlers: [file, console]
diff --git a/debian/manage_debconf.pl b/debian/manage_debconf.pl
new file mode 100755
index 0000000000..be8ed32050
--- /dev/null
+++ b/debian/manage_debconf.pl
@@ -0,0 +1,130 @@
+#!/usr/bin/perl
+#
+# Interface between our config files and the debconf database.
+#
+# Usage:
+#
+#   manage_debconf.pl <action>
+#
+# where <action> can be:
+#
+#   read:    read the configuration from the yaml into debconf
+#   update:  update the yaml config according to the debconf database
+use strict;
+use warnings;
+
+use Debconf::Client::ConfModule (qw/get set/);
+
+# map from the name of a setting in our .yaml file to the relevant debconf
+# setting.
+my %MAPPINGS=(
+    server_name => 'matrix-synapse/server-name',
+    report_stats => 'matrix-synapse/report-stats',
+);
+
+# enable debug if dpkg --debug
+my $DEBUG = $ENV{DPKG_MAINTSCRIPT_DEBUG};
+
+sub read_config {
+    my @files = @_;
+
+    foreach my $file (@files)  {
+        print STDERR "reading $file\n" if $DEBUG;
+
+        open my $FH, "<", $file or next;
+
+        # rudimentary parsing which (a) avoids having to depend on a yaml library,
+        # and (b) is tolerant of yaml errors
+        while($_ = <$FH>) {
+            while (my ($setting, $debconf) = each %MAPPINGS) {
+                $setting = quotemeta $setting;
+                if(/^${setting}\s*:(.*)$/) {
+                    my $val = $1;
+
+                    # remove leading/trailing whitespace
+                    $val =~ s/^\s*//;
+                    $val =~ s/\s*$//;
+
+                    # remove surrounding quotes
+                    if ($val =~ /^"(.*)"$/ || $val =~ /^'(.*)'$/) {
+                        $val = $1;
+                    }
+
+                    print STDERR ">> $debconf = $val\n" if $DEBUG;
+                    set($debconf, $val);
+                }
+            }
+        }
+        close $FH;
+    }
+}
+
+sub update_config {
+    my @files = @_;
+
+    my %substs = ();
+    while (my ($setting, $debconf) = each %MAPPINGS) {
+        my @res = get($debconf);
+        $substs{$setting} = $res[1] if $res[0] == 0;
+    }
+
+    foreach my $file (@files) {
+        print STDERR "checking $file\n" if $DEBUG;
+
+        open my $FH, "<", $file or next;
+
+        my $updated = 0;
+
+        # read the whole file into memory
+        my @lines = <$FH>;
+
+        while (my ($setting, $val) = each %substs) {
+            $setting = quotemeta $setting;
+
+            map {
+                if (/^${setting}\s*:\s*(.*)\s*$/) {
+                    my $current = $1;
+                    if ($val ne $current) {
+                        $_ = "${setting}: $val\n";
+                        $updated = 1;
+                    }
+                }
+            } @lines;
+        }
+        close $FH;
+
+        next unless $updated;
+
+        print STDERR "updating $file\n" if $DEBUG;
+        open $FH, ">", $file or die "unable to update $file";
+        print $FH @lines;
+        close $FH;
+    }
+}
+
+
+my $cmd = $ARGV[0];
+
+my $read = 0;
+my $update = 0;
+
+if (not $cmd) {
+    die "must specify a command to perform\n";
+} elsif ($cmd eq 'read') {
+    $read = 1;
+} elsif ($cmd eq 'update') {
+    $update = 1;
+} else {
+    die "unknown command '$cmd'\n";
+}
+
+my @files = (
+    "/etc/matrix-synapse/homeserver.yaml",
+    glob("/etc/matrix-synapse/conf.d/*.yaml"),
+);
+
+if ($read) {
+    read_config(@files);
+} elsif ($update) {
+    update_config(@files);
+}
diff --git a/debian/manpages b/debian/manpages
new file mode 100644
index 0000000000..2c30583530
--- /dev/null
+++ b/debian/manpages
@@ -0,0 +1,4 @@
+debian/hash_password.1
+debian/register_new_matrix_user.1
+debian/synapse_port_db.1
+debian/synctl.1
diff --git a/debian/matrix-synapse-py3.config b/debian/matrix-synapse-py3.config
new file mode 100755
index 0000000000..3bda3292f1
--- /dev/null
+++ b/debian/matrix-synapse-py3.config
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -e
+
+. /usr/share/debconf/confmodule
+
+# try to update the debconf db according to whatever is in the config files
+/opt/venvs/matrix-synapse/lib/manage_debconf.pl read || true
+
+db_input high matrix-synapse/server-name || true
+db_input high matrix-synapse/report-stats || true
+db_go
diff --git a/debian/matrix-synapse-py3.links b/debian/matrix-synapse-py3.links
new file mode 100644
index 0000000000..bf19efa562
--- /dev/null
+++ b/debian/matrix-synapse-py3.links
@@ -0,0 +1,4 @@
+opt/venvs/matrix-synapse/bin/hash_password usr/bin/hash_password
+opt/venvs/matrix-synapse/bin/register_new_matrix_user usr/bin/register_new_matrix_user
+opt/venvs/matrix-synapse/bin/synapse_port_db usr/bin/synapse_port_db
+opt/venvs/matrix-synapse/bin/synctl usr/bin/synctl
diff --git a/debian/matrix-synapse-py3.postinst b/debian/matrix-synapse-py3.postinst
new file mode 100644
index 0000000000..c0dd7e5534
--- /dev/null
+++ b/debian/matrix-synapse-py3.postinst
@@ -0,0 +1,56 @@
+#!/bin/sh -e
+
+. /usr/share/debconf/confmodule
+
+CONFIGFILE_SERVERNAME="/etc/matrix-synapse/conf.d/server_name.yaml"
+CONFIGFILE_REPORTSTATS="/etc/matrix-synapse/conf.d/report_stats.yaml"
+USER="matrix-synapse"
+
+case "$1" in
+  configure|reconfigure)
+
+    # generate template config files if they don't exist
+    mkdir -p "/etc/matrix-synapse/conf.d/"
+    if [ ! -e "$CONFIGFILE_SERVERNAME" ]; then
+        cat > "$CONFIGFILE_SERVERNAME" <<EOF
+# This file is autogenerated, and will be recreated on upgrade if it is deleted.
+# Any changes you make will be preserved.
+
+# The domain name of the server, with optional explicit port.
+# This is used by remote servers to connect to this server,
+# e.g. matrix.org, localhost:8080, etc.
+# This is also the last part of your UserID.
+#
+server_name: ''
+EOF
+    fi
+
+    if [ ! -e "$CONFIGFILE_REPORTSTATS" ]; then
+        cat > "$CONFIGFILE_REPORTSTATS" <<EOF
+# This file is autogenerated, and will be recreated on upgrade if it is deleted.
+# Any changes you make will be preserved.
+
+# Whether to report anonymized homeserver usage statistics.
+report_stats: false
+EOF
+    fi
+
+    # update the config files according to whatever is in the debconf database
+    /opt/venvs/matrix-synapse/lib/manage_debconf.pl update
+
+    if ! getent passwd $USER >/dev/null; then
+      adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
+    fi
+
+    for DIR in /var/lib/matrix-synapse /var/log/matrix-synapse /etc/matrix-synapse; do
+      if ! dpkg-statoverride --list --quiet $DIR >/dev/null; then
+        dpkg-statoverride --force --quiet --update --add $USER nogroup 0755 $DIR
+      fi
+    done
+
+    ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/matrix-synapse-py3.preinst b/debian/matrix-synapse-py3.preinst
new file mode 100644
index 0000000000..4b5612f050
--- /dev/null
+++ b/debian/matrix-synapse-py3.preinst
@@ -0,0 +1,31 @@
+#!/bin/sh -e
+
+# Attempt to undo some of the braindamage caused by
+# https://github.com/matrix-org/package-synapse-debian/issues/18.
+#
+# Due to reasons [1], the old python2 matrix-synapse package will not stop the
+# service when the package is uninstalled. Our maintainer scripts will do the
+# right thing in terms of ensuring the service is enabled and unmasked, but
+# then do a `systemctl start matrix-synapse`, which of course does nothing -
+# leaving the old (py2) service running.
+#
+# There should normally be no reason for the service to be running during our
+# preinst, so we assume that if it *is* running, it's due to that situation,
+# and stop it.
+#
+# [1] dh_systemd_start doesn't do anything because it sees that there is an
+#     init.d script with the same name, so leaves it to dh_installinit.
+#
+#     dh_installinit doesn't do anything because somebody gave it a --no-start
+#     for unknown reasons.
+
+if [ -x /bin/systemctl ]; then
+    if /bin/systemctl --quiet is-active -- matrix-synapse; then
+        echo >&2 "stopping existing matrix-synapse service"
+        /bin/systemctl stop matrix-synapse || true
+    fi
+fi
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/matrix-synapse-py3.triggers b/debian/matrix-synapse-py3.triggers
new file mode 100644
index 0000000000..f8c1fdb021
--- /dev/null
+++ b/debian/matrix-synapse-py3.triggers
@@ -0,0 +1,9 @@
+# Register interest in Python interpreter changes and
+# don't make the Python package dependent on the virtualenv package
+# processing (noawait)
+interest-noawait /usr/bin/python3.5
+interest-noawait /usr/bin/python3.6
+interest-noawait /usr/bin/python3.7
+
+# Also provide a symbolic trigger for all dh-virtualenv packages
+interest dh-virtualenv-interpreter-update
diff --git a/debian/matrix-synapse.default b/debian/matrix-synapse.default
new file mode 100644
index 0000000000..65dc2f33d8
--- /dev/null
+++ b/debian/matrix-synapse.default
@@ -0,0 +1,2 @@
+# Specify environment variables used when running Synapse
+# SYNAPSE_CACHE_FACTOR=1 (default)
diff --git a/debian/matrix-synapse.service b/debian/matrix-synapse.service
new file mode 100644
index 0000000000..942e4b83fe
--- /dev/null
+++ b/debian/matrix-synapse.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Synapse Matrix homeserver
+
+[Service]
+Type=simple
+User=matrix-synapse
+WorkingDirectory=/var/lib/matrix-synapse
+EnvironmentFile=/etc/default/matrix-synapse
+ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
+ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=3
+
+[Install]
+WantedBy=multi-user.target
diff --git a/debian/po/POTFILES.in b/debian/po/POTFILES.in
new file mode 100644
index 0000000000..cef83a3407
--- /dev/null
+++ b/debian/po/POTFILES.in
@@ -0,0 +1 @@
+[type: gettext/rfc822deb] templates
diff --git a/debian/po/templates.pot b/debian/po/templates.pot
new file mode 100644
index 0000000000..84d960761a
--- /dev/null
+++ b/debian/po/templates.pot
@@ -0,0 +1,56 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the matrix-synapse package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: matrix-synapse\n"
+"Report-Msgid-Bugs-To: matrix-synapse@packages.debian.org\n"
+"POT-Creation-Date: 2017-02-21 07:51+0000\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=CHARSET\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. Type: string
+#. Description
+#: ../templates:1001
+msgid "Name of the server:"
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../templates:1001
+msgid ""
+"The name that this homeserver will appear as, to clients and other servers "
+"via federation. This name should match the SRV record published in DNS."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../templates:2001
+msgid "Report anonymous statistics?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../templates:2001
+msgid ""
+"Developers of Matrix and Synapse really appreciate helping the project out "
+"by reporting anonymized usage statistics from this homeserver. Only very "
+"basic aggregate data (e.g. number of users) will be reported, but it helps "
+"track the growth of the Matrix community, and helps in making Matrix a "
+"success, as well as to convince other networks that they should peer with "
+"Matrix."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../templates:2001
+msgid "Thank you."
+msgstr ""
diff --git a/debian/register_new_matrix_user.1 b/debian/register_new_matrix_user.1
new file mode 100644
index 0000000000..99156a7354
--- /dev/null
+++ b/debian/register_new_matrix_user.1
@@ -0,0 +1,72 @@
+.\" generated with Ronn/v0.7.3
+.\" http://github.com/rtomayko/ronn/tree/0.7.3
+.
+.TH "REGISTER_NEW_MATRIX_USER" "1" "February 2017" "" ""
+.
+.SH "NAME"
+\fBregister_new_matrix_user\fR \- Used to register new users with a given home server when registration has been disabled
+.
+.SH "SYNOPSIS"
+\fBregister_new_matrix_user\fR options\.\.\.
+.
+.SH "DESCRIPTION"
+\fBregister_new_matrix_user\fR registers new users with a given home server when registration has been disabled\. For this to work, the home server must be configured with the \'registration_shared_secret\' option set\.
+.
+.P
+This accepts the user credentials like the username, password, is user an admin or not and registers the user onto the homeserver database\. Also, a YAML file containing the shared secret can be provided\. If not, the shared secret can be provided via the command line\.
+.
+.P
+By default it assumes the home server URL to be \fBhttps://localhost:8448\fR\. This can be changed via the \fBserver_url\fR command line option\.
+.
+.SH "FILES"
+A sample YAML file accepted by \fBregister_new_matrix_user\fR is described below:
+.
+.IP "" 4
+.
+.nf
+
+registration_shared_secret: "s3cr3t"
+.
+.fi
+.
+.IP "" 0
+.
+.SH "OPTIONS"
+.
+.TP
+\fB\-u\fR, \fB\-\-user\fR
+Local part of the new user\. Will prompt if omitted\.
+.
+.TP
+\fB\-p\fR, \fB\-\-password\fR
+New password for user\. Will prompt if omitted\. Supplying the password on the command line is not recommended\. Use the STDIN instead\.
+.
+.TP
+\fB\-a\fR, \fB\-\-admin\fR
+Register new user as an admin\. Will prompt if omitted\.
+.
+.TP
+\fB\-c\fR, \fB\-\-config\fR
+Path to server config file containing the shared secret\.
+.
+.TP
+\fB\-k\fR, \fB\-\-shared\-secret\fR
+Shared secret as defined in server config file\. This is an optional parameter as it can be also supplied via the YAML file\.
+.
+.TP
+\fBserver_url\fR
+URL of the home server\. Defaults to \'https://localhost:8448\'\.
+.
+.SH "EXAMPLES"
+.
+.nf
+
+$ register_new_matrix_user \-u user1 \-p p@ssword \-a \-c config\.yaml
+.
+.fi
+.
+.SH "COPYRIGHT"
+This man page was written by Rahul De <\fIrahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
+.
+.SH "SEE ALSO"
+synctl(1), synapse_port_db(1), hash_password(1)
diff --git a/debian/register_new_matrix_user.ronn b/debian/register_new_matrix_user.ronn
new file mode 100644
index 0000000000..4c22e74dde
--- /dev/null
+++ b/debian/register_new_matrix_user.ronn
@@ -0,0 +1,61 @@
+register_new_matrix_user(1) -- Used to register new users with a given home server when registration has been disabled
+======================================================================================================================
+
+## SYNOPSIS
+
+`register_new_matrix_user` options...
+
+## DESCRIPTION
+
+**register_new_matrix_user** registers new users with a given home server when
+registration has been disabled. For this to work, the home server must be
+configured with the 'registration_shared_secret' option set.
+
+This accepts the user credentials like the username, password, is user an
+admin or not and registers the user onto the homeserver database. Also,
+a YAML file containing the shared secret can be provided. If not, the
+shared secret can be provided via the command line.
+
+By default it assumes the home server URL to be `https://localhost:8448`.
+This can be changed via the `server_url` command line option.
+
+## FILES
+
+A sample YAML file accepted by `register_new_matrix_user` is described below:
+
+    registration_shared_secret: "s3cr3t"
+
+## OPTIONS
+
+  * `-u`, `--user`:
+    Local part of the new user. Will prompt if omitted.
+
+  * `-p`, `--password`:
+    New password for user. Will prompt if omitted. Supplying the password
+    on the command line is not recommended. Use the STDIN instead.
+
+  * `-a`, `--admin`:
+    Register new user as an admin. Will prompt if omitted.
+
+  * `-c`, `--config`:
+    Path to server config file containing the shared secret.
+
+  * `-k`, `--shared-secret`:
+    Shared secret as defined in server config file. This is an optional
+    parameter as it can be also supplied via the YAML file.
+
+  * `server_url`:
+    URL of the home server. Defaults to 'https://localhost:8448'.
+
+## EXAMPLES
+
+    $ register_new_matrix_user -u user1 -p p@ssword -a -c config.yaml
+
+## COPYRIGHT
+
+This man page was written by Rahul De <<rahulde@swecha.net>>
+for Debian GNU/Linux distribution.
+
+## SEE ALSO
+
+synctl(1), synapse_port_db(1), hash_password(1)
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000000..05cbbdde08
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,22 @@
+#!/usr/bin/make -f
+#
+# Build Debian package using https://github.com/spotify/dh-virtualenv
+#
+
+override_dh_systemd_enable:
+	dh_systemd_enable --name=matrix-synapse
+
+override_dh_installinit:
+	dh_installinit --name=matrix-synapse
+
+override_dh_strip:
+
+override_dh_shlibdeps:
+
+override_dh_virtualenv:
+	./debian/build_virtualenv
+
+# We are restricted to compat level 9 (because xenial), so have to
+# enable the systemd bits manually.
+%:
+	dh $@ --with python-virtualenv --with systemd
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000000..89ae9db8f8
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/debian/synapse_port_db.1 b/debian/synapse_port_db.1
new file mode 100644
index 0000000000..4e6bc04827
--- /dev/null
+++ b/debian/synapse_port_db.1
@@ -0,0 +1,98 @@
+.\" generated with Ronn/v0.7.3
+.\" http://github.com/rtomayko/ronn/tree/0.7.3
+.
+.TH "SYNAPSE_PORT_DB" "1" "February 2017" "" ""
+.
+.SH "NAME"
+\fBsynapse_port_db\fR \- A script to port an existing synapse SQLite database to a new PostgreSQL database\.
+.
+.SH "SYNOPSIS"
+\fBsynapse_port_db\fR [\-v] \-\-sqlite\-database=\fIdbfile\fR \-\-postgres\-config=\fIyamlconfig\fR [\-\-curses] [\-\-batch\-size=\fIbatch\-size\fR]
+.
+.SH "DESCRIPTION"
+\fBsynapse_port_db\fR ports an existing synapse SQLite database to a new PostgreSQL database\.
+.
+.P
+SQLite database is specified with \fB\-\-sqlite\-database\fR option and PostgreSQL configuration required to connect to PostgreSQL database is provided using \fB\-\-postgres\-config\fR configuration\. The configuration is specified in YAML format\.
+.
+.SH "OPTIONS"
+.
+.TP
+\fB\-v\fR
+Print log messages in \fBdebug\fR level instead of \fBinfo\fR level\.
+.
+.TP
+\fB\-\-sqlite\-database\fR
+The snapshot of the SQLite database file\. This must not be currently used by a running synapse server\.
+.
+.TP
+\fB\-\-postgres\-config\fR
+The database config file for the PostgreSQL database\.
+.
+.TP
+\fB\-\-curses\fR
+Display a curses based progress UI\.
+.
+.SH "CONFIG FILE"
+The postgres configuration file must be a valid YAML file with the following options\.
+.
+.IP "\(bu" 4
+\fBdatabase\fR: Database configuration section\. This section header can be ignored and the options below may be specified as top level keys\.
+.
+.IP "\(bu" 4
+\fBname\fR: Connector to use when connecting to the database\. This value must be \fBpsycopg2\fR\.
+.
+.IP "\(bu" 4
+\fBargs\fR: DB API 2\.0 compatible arguments to send to the \fBpsycopg2\fR module\.
+.
+.IP "\(bu" 4
+\fBdbname\fR \- the database name
+.
+.IP "\(bu" 4
+\fBuser\fR \- user name used to authenticate
+.
+.IP "\(bu" 4
+\fBpassword\fR \- password used to authenticate
+.
+.IP "\(bu" 4
+\fBhost\fR \- database host address (defaults to UNIX socket if not provided)
+.
+.IP "\(bu" 4
+\fBport\fR \- connection port number (defaults to 5432 if not provided)
+.
+.IP "" 0
+
+.
+.IP "\(bu" 4
+\fBsynchronous_commit\fR: Optional\. Default is True\. If the value is \fBFalse\fR, enable asynchronous commit and don\'t wait for the server to call fsync before ending the transaction\. See: https://www\.postgresql\.org/docs/current/static/wal\-async\-commit\.html
+.
+.IP "" 0
+
+.
+.IP "" 0
+.
+.P
+Following example illustrates the configuration file format\.
+.
+.IP "" 4
+.
+.nf
+
+database:
+  name: psycopg2
+  args:
+    dbname: synapsedb
+    user: synapseuser
+    password: ORohmi9Eet=ohphi
+    host: localhost
+  synchronous_commit: false
+.
+.fi
+.
+.IP "" 0
+.
+.SH "COPYRIGHT"
+This man page was written by Sunil Mohan Adapa <\fIsunil@medhas\.org\fR> for Debian GNU/Linux distribution\.
+.
+.SH "SEE ALSO"
+synctl(1), hash_password(1), register_new_matrix_user(1)
diff --git a/debian/synapse_port_db.ronn b/debian/synapse_port_db.ronn
new file mode 100644
index 0000000000..fcb32ebd0d
--- /dev/null
+++ b/debian/synapse_port_db.ronn
@@ -0,0 +1,87 @@
+synapse_port_db(1) -- A script to port an existing synapse SQLite database to a new PostgreSQL database.
+=============================================
+
+## SYNOPSIS
+
+`synapse_port_db` [-v] --sqlite-database=<dbfile> --postgres-config=<yamlconfig> [--curses] [--batch-size=<batch-size>]
+
+## DESCRIPTION
+
+**synapse_port_db** ports an existing synapse SQLite database to a new
+PostgreSQL database.
+
+SQLite database is specified with `--sqlite-database` option and
+PostgreSQL configuration required to connect to PostgreSQL database is
+provided using `--postgres-config` configuration.  The configuration
+is specified in YAML format.
+
+## OPTIONS
+
+  * `-v`:
+    Print log messages in `debug` level instead of `info` level.
+
+  * `--sqlite-database`:
+    The snapshot of the SQLite database file. This must not be
+    currently used by a running synapse server.
+
+  * `--postgres-config`:
+    The database config file for the PostgreSQL database.
+
+  * `--curses`:
+    Display a curses based progress UI.
+
+## CONFIG FILE
+
+The postgres configuration file must be a valid YAML file with the
+following options.
+
+  * `database`:
+    Database configuration section.  This section header can be
+    ignored and the options below may be specified as top level
+    keys.
+
+    * `name`:
+      Connector to use when connecting to the database.  This value must
+      be `psycopg2`.
+
+    * `args`:
+      DB API 2.0 compatible arguments to send to the `psycopg2` module.
+
+      * `dbname` - the database name 
+
+      * `user` - user name used to authenticate
+
+      * `password` - password used to authenticate
+
+      * `host` - database host address (defaults to UNIX socket if not
+        provided)
+
+      * `port` - connection port number (defaults to 5432 if not
+        provided)
+      
+
+    * `synchronous_commit`:
+      Optional.  Default is True.  If the value is `False`, enable
+      asynchronous commit and don't wait for the server to call fsync
+      before ending the transaction. See:
+      https://www.postgresql.org/docs/current/static/wal-async-commit.html
+
+Following example illustrates the configuration file format.
+
+    database:
+      name: psycopg2
+      args:
+        dbname: synapsedb
+        user: synapseuser
+        password: ORohmi9Eet=ohphi
+        host: localhost
+      synchronous_commit: false
+  
+## COPYRIGHT
+
+This man page was written by Sunil Mohan Adapa <<sunil@medhas.org>> for
+Debian GNU/Linux distribution.
+
+## SEE ALSO
+
+synctl(1), hash_password(1), register_new_matrix_user(1)
diff --git a/debian/synctl.1 b/debian/synctl.1
new file mode 100644
index 0000000000..437f8f9e0e
--- /dev/null
+++ b/debian/synctl.1
@@ -0,0 +1,63 @@
+.\" generated with Ronn/v0.7.3
+.\" http://github.com/rtomayko/ronn/tree/0.7.3
+.
+.TH "SYNCTL" "1" "February 2017" "" ""
+.
+.SH "NAME"
+\fBsynctl\fR \- Synapse server control interface
+.
+.SH "SYNOPSIS"
+Start, stop or restart synapse server\.
+.
+.P
+\fBsynctl\fR {start|stop|restart} [configfile] [\-w|\-\-worker=\fIWORKERCONFIG\fR] [\-a|\-\-all\-processes=\fIWORKERCONFIGDIR\fR]
+.
+.SH "DESCRIPTION"
+\fBsynctl\fR can be used to start, stop or restart Synapse server\. The control operation can be done on all processes or a single worker process\.
+.
+.SH "OPTIONS"
+.
+.TP
+\fBaction\fR
+The value of action should be one of \fBstart\fR, \fBstop\fR or \fBrestart\fR\.
+.
+.TP
+\fBconfigfile\fR
+Optional path of the configuration file to use\. Default value is \fBhomeserver\.yaml\fR\. The configuration file must exist for the operation to succeed\.
+.
+.TP
+\fB\-w\fR, \fB\-\-worker\fR:
+.
+.IP
+Perform start, stop or restart operations on a single worker\. Incompatible with \fB\-a\fR|\fB\-\-all\-processes\fR\. Value passed must be a valid worker\'s configuration file\.
+.
+.TP
+\fB\-a\fR, \fB\-\-all\-processes\fR:
+.
+.IP
+Perform start, stop or restart operations on all the workers in the given directory and the main synapse process\. Incompatible with \fB\-w\fR|\fB\-\-worker\fR\. Value passed must be a directory containing valid work configuration files\. All files ending with \fB\.yaml\fR extension shall be considered as configuration files and all other files in the directory are ignored\.
+.
+.SH "CONFIGURATION FILE"
+Configuration file may be generated as follows:
+.
+.IP "" 4
+.
+.nf
+
+$ python \-B \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
+.
+.fi
+.
+.IP "" 0
+.
+.SH "ENVIRONMENT"
+.
+.TP
+\fBSYNAPSE_CACHE_FACTOR\fR
+Synapse\'s architecture is quite RAM hungry currently \- a lot of recent room data and metadata is deliberately cached in RAM in order to speed up common requests\. This will be improved in future, but for now the easiest way to either reduce the RAM usage (at the risk of slowing things down) is to set the SYNAPSE_CACHE_FACTOR environment variable\. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1\.0 will max out at around 3\-4GB of resident memory \- this is what we currently run the matrix\.org on\. The default setting is currently 0\.1, which is probably around a ~700MB footprint\. You can dial it down further to 0\.02 if desired, which targets roughly ~512MB\. Conversely you can dial it up if you need performance for lots of users and have a box with a lot of RAM\.
+.
+.SH "COPYRIGHT"
+This man page was written by Sunil Mohan Adapa <\fIsunil@medhas\.org\fR> for Debian GNU/Linux distribution\.
+.
+.SH "SEE ALSO"
+synapse_port_db(1), hash_password(1), register_new_matrix_user(1)
diff --git a/debian/synctl.ronn b/debian/synctl.ronn
new file mode 100644
index 0000000000..a73c832f62
--- /dev/null
+++ b/debian/synctl.ronn
@@ -0,0 +1,70 @@
+synctl(1) -- Synapse server control interface
+=============================================
+
+## SYNOPSIS
+  Start, stop or restart synapse server.
+
+`synctl` {start|stop|restart} [configfile] [-w|--worker=<WORKERCONFIG>] [-a|--all-processes=<WORKERCONFIGDIR>]
+
+## DESCRIPTION
+
+**synctl** can be used to start, stop or restart Synapse server.  The
+control operation can be done on all processes or a single worker
+process.
+
+## OPTIONS
+
+  * `action`:
+    The value of action should be one of `start`, `stop` or `restart`.
+
+  * `configfile`:
+    Optional path of the configuration file to use.  Default value is
+    `homeserver.yaml`.  The configuration file must exist for the
+    operation to succeed.
+
+  * `-w`, `--worker`:
+
+    Perform start, stop or restart operations on a single worker.
+    Incompatible with `-a`|`--all-processes`.  Value passed must be a
+    valid worker's configuration file.
+
+  * `-a`, `--all-processes`:
+
+    Perform start, stop or restart operations on all the workers in
+    the given directory and the main synapse process. Incompatible
+    with `-w`|`--worker`.  Value passed must be a directory containing
+    valid work configuration files.  All files ending with `.yaml`
+    extension shall be considered as configuration files and all other
+    files in the directory are ignored.
+
+## CONFIGURATION FILE
+
+Configuration file may be generated as follows:
+
+    $ python -B -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
+
+## ENVIRONMENT
+
+  * `SYNAPSE_CACHE_FACTOR`:
+    Synapse's architecture is quite RAM hungry currently - a lot of
+    recent room data and metadata is deliberately cached in RAM in
+    order to speed up common requests.  This will be improved in
+    future, but for now the easiest way to either reduce the RAM usage
+    (at the risk of slowing things down) is to set the
+    SYNAPSE_CACHE_FACTOR environment variable. Roughly speaking, a
+    SYNAPSE_CACHE_FACTOR of 1.0 will max out at around 3-4GB of
+    resident memory - this is what we currently run the matrix.org
+    on. The default setting is currently 0.1, which is probably around
+    a ~700MB footprint. You can dial it down further to 0.02 if
+    desired, which targets roughly ~512MB. Conversely you can dial it
+    up if you need performance for lots of users and have a box with a
+    lot of RAM.
+
+## COPYRIGHT
+
+This man page was written by Sunil Mohan Adapa <<sunil@medhas.org>> for
+Debian GNU/Linux distribution.
+
+## SEE ALSO
+
+synapse_port_db(1), hash_password(1), register_new_matrix_user(1)
diff --git a/debian/templates b/debian/templates
new file mode 100644
index 0000000000..647358731c
--- /dev/null
+++ b/debian/templates
@@ -0,0 +1,19 @@
+Template: matrix-synapse/server-name
+Type: string
+_Description: Name of the server:
+ The name that this homeserver will appear as, to clients and other
+ servers via federation. This name should match the SRV record
+ published in DNS.
+
+Template: matrix-synapse/report-stats
+Type: boolean
+Default: false
+_Description: Report anonymous statistics?
+ Developers of Matrix and Synapse really appreciate helping the
+ project out by reporting anonymized usage statistics from this
+ homeserver. Only very basic aggregate data (e.g. number of users)
+ will be reported, but it helps track the growth of the Matrix
+ community, and helps in making Matrix a success, as well as to
+ convince other networks that they should peer with Matrix.
+ .
+ Thank you.
diff --git a/demo/.gitignore b/demo/.gitignore
new file mode 100644
index 0000000000..4d12712343
--- /dev/null
+++ b/demo/.gitignore
@@ -0,0 +1,7 @@
+*.db
+*.log
+*.log.*
+*.pid
+
+/media_store.*
+/etc
diff --git a/demo/demo.tls.dh b/demo/demo.tls.dh
deleted file mode 100644
index cbc58272a0..0000000000
--- a/demo/demo.tls.dh
+++ /dev/null
@@ -1,9 +0,0 @@
-2048-bit DH parameters taken from rfc3526
------BEGIN DH PARAMETERS-----
-MIIBCAKCAQEA///////////JD9qiIWjCNMTGYouA3BzRKQJOCIpnzHQCC76mOxOb
-IlFKCHmONATd75UZs806QxswKwpt8l8UN0/hNW1tUcJF5IW1dmJefsb0TELppjft
-awv/XLb0Brft7jhr+1qJn6WunyQRfEsf5kkoZlHs5Fs9wgB8uKFjvwWY2kg2HFXT
-mmkWP6j9JM9fg2VdI9yjrZYcYvNWIIVSu57VKQdwlpZtZww1Tkq8mATxdGwIyhgh
-fDKQXkYuNs474553LBgOhgObJ4Oi7Aeij7XFXfBvTFLJ3ivL9pVYFxg5lUl86pVq
-5RXSJhiY+gUQFXKOWoqsqmj//////////wIBAg==
------END DH PARAMETERS-----
diff --git a/docker/Dockerfile b/docker/Dockerfile
index db44c02a92..c35da67a2a 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,3 +1,16 @@
+# Dockerfile to build the matrixdotorg/synapse docker images.
+#
+# To build the image, run `docker build` command from the root of the
+# synapse repository:
+#
+#    docker build -f docker/Dockerfile .
+#
+# There is an optional PYTHON_VERSION build argument which sets the
+# version of python to build against: for example:
+#
+#    docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.6 .
+#
+
 ARG PYTHON_VERSION=2
 
 ###
@@ -31,11 +44,12 @@ RUN pip install --prefix="/install" --no-warn-script-location \
 
 # now install synapse and all of the python deps to /install.
 
-COPY . /synapse
+COPY synapse /synapse/synapse/
+COPY scripts /synapse/scripts/
+COPY MANIFEST.in README.rst setup.py synctl /synapse/
+
 RUN pip install --prefix="/install" --no-warn-script-location \
-        lxml \
-        psycopg2 \
-        /synapse
+        /synapse[all]
 
 ###
 ### Stage 1: runtime
@@ -58,6 +72,6 @@ COPY ./docker/conf /conf
 
 VOLUME ["/data"]
 
-EXPOSE 8008/tcp 8448/tcp
+EXPOSE 8008/tcp 8009/tcp 8448/tcp
 
 ENTRYPOINT ["/start.py"]
diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv
new file mode 100644
index 0000000000..224c92352d
--- /dev/null
+++ b/docker/Dockerfile-dhvirtualenv
@@ -0,0 +1,68 @@
+# A dockerfile which builds a docker image for building a debian package for
+# synapse. The distro to build for is passed as a docker build var.
+#
+# The default entrypoint expects the synapse source to be mounted as a
+# (read-only) volume at /synapse/source, and an output directory at /debs.
+#
+# A pair of environment variables (TARGET_USERID and TARGET_GROUPID) can be
+# passed to the docker container; if these are set, the build script will chown
+# the build products accordingly, to avoid ending up with things owned by root
+# in the host filesystem.
+
+# Get the distro we want to pull from as a dynamic build variable
+ARG distro=""
+
+###
+### Stage 0: build a dh-virtualenv
+###
+FROM ${distro} as builder
+
+RUN apt-get update -qq -o Acquire::Languages=none
+RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
+        -yqq --no-install-recommends \
+        build-essential \
+        ca-certificates \
+        devscripts \
+        equivs \
+        wget
+
+# fetch and unpack the package
+RUN wget -q -O /dh-virtuenv-1.1.tar.gz https://github.com/spotify/dh-virtualenv/archive/1.1.tar.gz
+RUN tar xvf /dh-virtuenv-1.1.tar.gz
+
+# install its build deps
+RUN cd dh-virtualenv-1.1/ \
+    && env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -yqq --no-install-recommends"
+
+# build it
+RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
+
+###
+### Stage 1
+###
+FROM ${distro}
+
+# Install the build dependencies
+RUN apt-get update -qq -o Acquire::Languages=none \
+    && env DEBIAN_FRONTEND=noninteractive apt-get install \
+        -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
+        build-essential \
+        debhelper \
+        devscripts \
+        dh-systemd \
+        lsb-release \
+        python3-dev \
+        python3-pip \
+        python3-setuptools \
+        python3-venv \
+        sqlite3
+
+COPY --from=builder /dh-virtualenv_1.1-1_all.deb /
+
+# install dhvirtualenv. Update the apt cache again first, in case we got a
+# cached cache from docker the first time.
+RUN apt-get update -qq -o Acquire::Languages=none \
+    && apt-get install -yq /dh-virtualenv_1.1-1_all.deb
+
+WORKDIR /synapse/source
+ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]
diff --git a/docker/README.md b/docker/README.md
index 3c00d1e948..3faedf629f 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -1,22 +1,21 @@
 # Synapse Docker
 
-This Docker image will run Synapse as a single process. It does not provide a database
-server or a TURN server, you should run these separately.
+This Docker image will run Synapse as a single process. By default it uses a
+sqlite database; for production use you should connect it to a separate
+postgres database.
 
-## Run
+The image also does *not* provide a TURN server.
 
-We do not currently offer a `latest` image, as this has somewhat undefined semantics.
-We instead release only tagged versions so upgrading between releases is entirely
-within your control.
+## Run
 
 ### Using docker-compose (easier)
 
-This image is designed to run either with an automatically generated configuration
-file or with a custom configuration that requires manual editing.
+This image is designed to run either with an automatically generated
+configuration file or with a custom configuration that requires manual editing.
 
 An easy way to make use of this image is via docker-compose. See the
-[contrib/docker](../contrib/docker)
-section of the synapse project for examples.
+[contrib/docker](../contrib/docker) section of the synapse project for
+examples.
 
 ### Without Compose (harder)
 
@@ -32,7 +31,7 @@ docker run \
     -v ${DATA_PATH}:/data \
     -e SYNAPSE_SERVER_NAME=my.matrix.host \
     -e SYNAPSE_REPORT_STATS=yes \
-    docker.io/matrixdotorg/synapse:latest
+    matrixdotorg/synapse:latest
 ```
 
 ## Volumes
@@ -53,6 +52,28 @@ In order to setup an application service, simply create an ``appservices``
 directory in the data volume and write the application service Yaml
 configuration file there. Multiple application services are supported.
 
+## TLS certificates
+
+Synapse requires a valid TLS certificate. You can do one of the following:
+
+ * Provide your own certificate and key (as
+   `${DATA_PATH}/${SYNAPSE_SERVER_NAME}.crt` and
+   `${DATA_PATH}/${SYNAPSE_SERVER_NAME}.key`, or elsewhere by providing an
+   entire config as `${SYNAPSE_CONFIG_PATH}`).
+
+ * Use a reverse proxy to terminate incoming TLS, and forward the plain http
+   traffic to port 8008 in the container. In this case you should set `-e
+   SYNAPSE_NO_TLS=1`.
+
+ * Use the ACME (Let's Encrypt) support built into Synapse. This requires
+   `${SYNAPSE_SERVER_NAME}` port 80 to be forwarded to port 8009 in the
+   container, for example with `-p 80:8009`. To enable it in the docker
+   container, set `-e SYNAPSE_ACME=1`.
+
+If you don't do any of these, Synapse will fail to start with an error similar to:
+
+    synapse.config._base.ConfigError: Error accessing file '/data/<server_name>.tls.crt' (config for tls_certificate): No such file or directory
+
 ## Environment
 
 Unless you specify a custom path for the configuration file, a very generic
@@ -71,7 +92,7 @@ then customize it manually. No other environment variable is required.
 Otherwise, a dynamic configuration file will be used. The following environment
 variables are available for configuration:
 
-* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
+* ``SYNAPSE_SERVER_NAME`` (mandatory), the server public hostname.
 * ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
   statistics reporting back to the Matrix project which helps us to get funding.
 * ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
@@ -80,7 +101,6 @@ variables are available for configuration:
   the Synapse instance.
 * ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
 * ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
-* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
 * ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
   key in order to enable recaptcha upon registration.
 * ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
@@ -88,7 +108,9 @@ variables are available for configuration:
 * ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
   uris to enable TURN for this homeserver.
 * ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
-* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size [default `10M`].
+* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size
+  [default `10M`].
+* ``SYNAPSE_ACME``: set this to enable the ACME certificate renewal support.
 
 Shared secrets, that will be initialized to random values if not set:
 
@@ -99,27 +121,25 @@ Shared secrets, that will be initialized to random values if not set:
 
 Database specific values (will use SQLite if not set):
 
-* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
-* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
-* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
-* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
+* `POSTGRES_DB` - The database name for the synapse postgres
+  database. [default: `synapse`]
+* `POSTGRES_HOST` - The host of the postgres database if you wish to use
+  postgresql instead of sqlite3. [default: `db` which is useful when using a
+  container on the same docker network in a compose file where the postgres
+  service is called `db`]
+* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If
+  this is set then postgres will be used instead of sqlite3.** [default: none]
+  **NOTE**: You are highly encouraged to use postgresql! Please use the compose
+  file to make it easier to deploy.
+* `POSTGRES_USER` - The user for the synapse postgres database. [default:
+  `matrix`]
 
 Mail server specific values (will not send emails if not set):
 
 * ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
-* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
-* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
-* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
-
-## Build
-
-Build the docker image with the `docker build` command from the root of the synapse repository.
-
-```
-docker build -t docker.io/matrixdotorg/synapse . -f docker/Dockerfile
-```
-
-The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
-
-You may have a local Python wheel cache available, in which case copy the relevant
-packages in the ``cache/`` directory at the root of the project.
+* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default
+  ``25``].
+* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if
+  any.
+* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail
+  server if any.
diff --git a/docker/build_debian.sh b/docker/build_debian.sh
new file mode 100644
index 0000000000..6ed2b39898
--- /dev/null
+++ b/docker/build_debian.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# The script to build the Debian package, as ran inside the Docker image.
+
+set -ex
+
+DIST=`lsb_release -c -s`
+
+# we get a read-only copy of the source: make a writeable copy
+cp -aT /synapse/source /synapse/build
+cd /synapse/build
+
+# add an entry to the changelog for this distribution
+dch -M -l "+$DIST" "build for $DIST"
+dch -M -r "" --force-distribution --distribution "$DIST"
+
+dpkg-buildpackage -us -uc
+
+ls -l ..
+
+# copy the build results out, setting perms if necessary
+shopt -s nullglob
+for i in ../*.deb ../*.dsc ../*.tar.xz ../*.changes ../*.buildinfo; do
+    [ -z "$TARGET_USERID" ] || chown "$TARGET_USERID" "$i"
+    [ -z "$TARGET_GROUPID" ] || chgrp "$TARGET_GROUPID" "$i"
+    mv "$i" /debs
+done
diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml
index 1b0f655d26..babd5bef9e 100644
--- a/docker/conf/homeserver.yaml
+++ b/docker/conf/homeserver.yaml
@@ -2,11 +2,18 @@
 
 ## TLS ##
 
+{% if not SYNAPSE_NO_TLS %}
+
 tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
 tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
-tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
-no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
-tls_fingerprints: []
+
+{% if SYNAPSE_ACME %}
+acme:
+    enabled: true
+    port: 8009
+{% endif %}
+
+{% endif %}
 
 ## Server ##
 
@@ -14,6 +21,7 @@ server_name: "{{ SYNAPSE_SERVER_NAME }}"
 pid_file: /homeserver.pid
 web_client: False
 soft_file_limit: 0
+log_config: "/compiled/log.config"
 
 ## Ports ##
 
@@ -67,9 +75,6 @@ database:
 ## Performance ##
 
 event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}"
-verbose: 0
-log_file: "/data/homeserver.log"
-log_config: "/compiled/log.config"
 
 ## Ratelimiting ##
 
@@ -150,10 +155,12 @@ enable_group_creation: true
 
 # The list of identity servers trusted to verify third party
 # identifiers by this server.
+#
+# Also defines the ID server which will be called when an account is
+# deactivated (one will be picked arbitrarily).
 trusted_third_party_id_servers:
     - matrix.org
     - vector.im
-    - riot.im
 
 ## Metrics ###
 
diff --git a/docker/start.py b/docker/start.py
index 346df8c87f..941d9996a8 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -47,9 +47,8 @@ if mode == "generate":
 
 # In normal mode, generate missing keys if any, then run synapse
 else:
-    # Parse the configuration file
     if "SYNAPSE_CONFIG_PATH" in environ:
-        args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]]
+        config_path = environ["SYNAPSE_CONFIG_PATH"]
     else:
         check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"))
         generate_secrets(environ, {
@@ -58,10 +57,21 @@ else:
         })
         environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
         if not os.path.exists("/compiled"): os.mkdir("/compiled")
-        convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ)
+
+        config_path = "/compiled/homeserver.yaml"
+
+        convert("/conf/homeserver.yaml", config_path, environ)
         convert("/conf/log.config", "/compiled/log.config", environ)
         subprocess.check_output(["chown", "-R", ownership, "/data"])
-        args += ["--config-path", "/compiled/homeserver.yaml"]
+
+
+    args += [
+        "--config-path", config_path,
+
+        # tell synapse to put any generated keys in /data rather than /compiled
+        "--keys-directory", "/data",
+    ]
+
     # Generate missing keys and start synapse
     subprocess.check_output(args + ["--generate-keys"])
     os.execv("/sbin/su-exec", ["su-exec", ownership] + args)
diff --git a/docs/ACME.md b/docs/ACME.md
new file mode 100644
index 0000000000..46136a9f2c
--- /dev/null
+++ b/docs/ACME.md
@@ -0,0 +1,129 @@
+# ACME
+
+Synapse v1.0 will require valid TLS certificates for communication between
+servers (port `8448` by default) in addition to those that are client-facing
+(port `443`). If you do not already have a valid certificate for your domain,
+the easiest way to get one is with Synapse's new ACME support, which will use
+the ACME protocol to provision a certificate automatically. Synapse v0.99.0+
+will provision server-to-server certificates automatically for you for free
+through [Let's Encrypt](https://letsencrypt.org/) if you tell it to.
+
+In the case that your `server_name` config variable is the same as
+the hostname that the client connects to, then the same certificate can be
+used between client and federation ports without issue.
+
+If your configuration file does not already have an `acme` section, you can
+generate an example config by running the `generate_config` executable. For
+example:
+
+```
+~/synapse/env3/bin/generate_config
+```
+
+You will need to provide Let's Encrypt (or another ACME provider) access to
+your Synapse ACME challenge responder on port 80, at the domain of your
+homeserver. This requires you to either change the port of the ACME listener
+provided by Synapse to a high port and reverse proxy to it, or use a tool
+like `authbind` to allow Synapse to listen on port 80 without root access.
+(Do not run Synapse with root permissions!) Detailed instructions are
+available under "ACME setup" below.
+
+If you already have certificates, you will need to back up or delete them
+(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
+directory), Synapse's ACME implementation will not overwrite them.
+
+You may wish to use alternate methods such as Certbot to obtain a certificate
+from Let's Encrypt, depending on your server configuration. Of course, if you
+already have a valid certificate for your homeserver's domain, that can be
+placed in Synapse's config directory without the need for any ACME setup.
+
+## ACME setup
+
+The main steps for enabling ACME support in short summary are:
+
+1. Allow Synapse to listen for incoming ACME challenges.
+1. Enable ACME support in `homeserver.yaml`.
+1. Move your old certificates (files `example.com.tls.crt` and `example.com.tls.key` out of the way if they currently exist at the paths specified in `homeserver.yaml`.
+1. Restart Synapse.
+
+Detailed instructions for each step are provided below.
+
+### Listening on port 80
+
+In order for Synapse to complete the ACME challenge to provision a
+certificate, it needs access to port 80. Typically listening on port 80 is
+only granted to applications running as root. There are thus two solutions to
+this problem.
+
+#### Using a reverse proxy
+
+A reverse proxy such as Apache or nginx allows a single process (the web
+server) to listen on port 80 and proxy traffic to the appropriate program
+running on your server. It is the recommended method for setting up ACME as
+it allows you to use your existing webserver while also allowing Synapse to
+provision certificates as needed.
+
+For nginx users, add the following line to your existing `server` block:
+
+```
+location /.well-known/acme-challenge {
+    proxy_pass http://localhost:8009/;
+}
+```
+
+For Apache, add the following to your existing webserver config:
+
+```
+ProxyPass /.well-known/acme-challenge http://localhost:8009/.well-known/acme-challenge
+```
+
+Make sure to restart/reload your webserver after making changes.
+
+Now make the relevant changes in `homeserver.yaml` to enable ACME support:
+
+```
+acme:
+    enabled: true
+    port: 8009
+```
+
+#### Authbind
+
+`authbind` allows a program which does not run as root to bind to
+low-numbered ports in a controlled way. The setup is simpler, but requires a
+webserver not to already be running on port 80. **This includes every time
+Synapse renews a certificate**, which may be cumbersome if you usually run a
+web server on port 80. Nevertheless, if you're sure port 80 is not being used
+for any other purpose then all that is necessary is the following:
+
+Install `authbind`. For example, on Debian/Ubuntu:
+
+```
+sudo apt-get install authbind
+```
+
+Allow `authbind` to bind port 80:
+
+```
+sudo touch /etc/authbind/byport/80
+sudo chmod 777 /etc/authbind/byport/80
+```
+
+When Synapse is started, use the following syntax:
+
+```
+authbind --deep <synapse start command>
+```
+
+Make the relevant changes in `homeserver.yaml` to enable ACME support:
+
+```
+acme:
+    enabled: true
+```
+
+### (Re)starting synapse
+
+Ensure that the certificate paths specified in `homeserver.yaml` (`tls_certificate_path` and `tls_private_key_path`) do not currently point to any files. Synapse will not provision certificates if files exist, as it does not want to overwrite existing certificates.
+
+Finally, start/restart Synapse.
diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md
new file mode 100644
index 0000000000..8eb22656db
--- /dev/null
+++ b/docs/MSC1711_certificates_FAQ.md
@@ -0,0 +1,338 @@
+# MSC1711 Certificates FAQ
+
+The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
+supports the r0.1 release of the server to server specification, but is
+compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well
+as post-r0.1 behaviour, in order to allow for a smooth upgrade across the
+federation.
+
+The most important thing to know is that Synapse 1.0.0 will require a valid TLS
+certificate on federation endpoints. Self signed certificates will not be
+sufficient.
+
+Synapse 0.99.0 makes it easy to configure TLS certificates and will
+interoperate with both >= 1.0.0 servers as well as existing servers yet to
+upgrade.
+
+**It is critical that all admins upgrade to 0.99.0 and configure a valid TLS
+certificate.** Admins will have 1 month to do so, after which 1.0.0 will be
+released and those servers without a valid certificate will not longer be able
+to federate with >= 1.0.0 servers.
+
+Full details on how to carry out this configuration change is given
+[below](#configuring-certificates-for-compatibility-with-synapse-100). A
+timeline and some frequently asked questions are also given below.
+
+For more details and context on the release of the r0.1 Server/Server API and
+imminent Matrix 1.0 release, you can also see our
+[main talk from FOSDEM 2019](https://matrix.org/blog/2019/02/04/matrix-at-fosdem-2019/).
+
+## Contents
+* Timeline
+* Configuring certificates for compatibility with Synapse 1.0
+* FAQ
+  * Synapse 0.99.0 has just been released, what do I need to do right now?
+  * How do I upgrade?
+  * What will happen if I do not set up a valid federation certificate
+    immediately?
+  * What will happen if I do nothing at all?
+  * When do I need a SRV record or .well-known URI?
+  * Can I still use an SRV record?
+  * I have created a .well-known URI. Do I still need an SRV record?
+  * It used to work just fine, why are you breaking everything?
+  * Can I manage my own certificates rather than having Synapse renew
+    certificates itself?
+  * Do you still recommend against using a reverse proxy on the federation port?
+  * Do I still need to give my TLS certificates to Synapse if I am using a
+    reverse proxy?
+  * Do I need the same certificate for the client and federation port?
+  * How do I tell Synapse to reload my keys/certificates after I replace them?
+
+## Timeline
+
+**5th Feb 2019  - Synapse 0.99.0 is released.**
+
+All server admins are encouraged to upgrade.
+
+0.99.0:
+
+-   provides support for ACME to make setting up Let's Encrypt certs easy, as
+    well as .well-known support.
+
+-   does not enforce that a valid CA cert is present on the federation API, but
+    rather makes it easy to set one up.
+
+-   provides support for .well-known
+
+Admins should upgrade and configure a valid CA cert. Homeservers that require a
+.well-known entry (see below), should retain their SRV record and use it
+alongside their .well-known record.
+
+**>= 5th March 2019  - Synapse 1.0.0 is released**
+
+1.0.0 will land no sooner than 1 month after 0.99.0, leaving server admins one
+month after 5th February to upgrade to 0.99.0 and deploy their certificates. In
+accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html)
+1.0.0 will enforce certificate validity. This means that any homeserver without a
+valid certificate after this point will no longer be able to federate with
+1.0.0 servers.
+
+
+## Configuring certificates for compatibility with Synapse 1.0.0
+
+### If you do not currently have an SRV record
+
+In this case, your `server_name` points to the host where your Synapse is
+running. There is no need to create a `.well-known` URI or an SRV record, but
+you will need to give Synapse a valid, signed, certificate.
+
+The easiest way to do that is with Synapse's built-in ACME (Let's Encrypt)
+support. Full details are in [ACME.md](./ACME.md) but, in a nutshell:
+
+ 1. Allow Synapse to listen on port 80 with `authbind`, or forward it from a
+    reverse proxy.
+ 2. Enable acme support in `homeserver.yaml`.
+ 3. Move your old certificates out of the way.
+ 4. Restart Synapse.
+
+### If you do have an SRV record currently
+
+If you are using an SRV record, your matrix domain (`server_name`) may not
+point to the same host that your Synapse is running on (the 'target
+domain'). (If it does, you can follow the recommendation above; otherwise, read
+on.)
+
+Let's assume that your `server_name` is `example.com`, and your Synapse is
+hosted at a target domain of `customer.example.net`. Currently you should have
+an SRV record which looks like:
+
+```
+_matrix._tcp.example.com. IN SRV 10 5 8000 customer.example.net.
+```
+
+In this situation, you have three choices for how to proceed:
+
+#### Option 1: give Synapse a certificate for your matrix domain
+
+Synapse 1.0 will expect your server to present a TLS certificate for your
+`server_name` (`example.com` in the above example). You can achieve this by
+doing one of the following:
+
+ * Acquire a certificate for the `server_name` yourself (for example, using
+   `certbot`), and give it and the key to Synapse via `tls_certificate_path`
+   and `tls_private_key_path`, or:
+
+ * Use Synapse's [ACME support](./ACME.md), and forward port 80 on the
+   `server_name` domain to your Synapse instance.
+
+#### Option 2: run Synapse behind a reverse proxy
+
+If you have an existing reverse proxy set up with correct TLS certificates for
+your domain, you can simply route all traffic through the reverse proxy by
+updating the SRV record appropriately (or removing it, if the proxy listens on
+8448).
+
+See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
+reverse proxy.
+
+#### Option 3: add a .well-known file to delegate your matrix traffic
+
+This will allow you to keep Synapse on a separate domain, without having to
+give it a certificate for the matrix domain.
+
+You can do this with a `.well-known` file as follows:
+
+ 1. Keep the SRV record in place - it is needed for backwards compatibility
+    with Synapse 0.34 and earlier.
+
+ 2. Give synapse a certificate corresponding to the target domain
+    (`customer.example.net` in the above example). Currently Synapse's ACME
+    support [does not support
+    this](https://github.com/matrix-org/synapse/issues/4552), so you will have
+    to acquire a certificate yourself and give it to Synapse via
+    `tls_certificate_path` and `tls_private_key_path`.
+
+ 3. Restart Synapse to ensure the new certificate is loaded.
+
+ 4. Arrange for a `.well-known` file at
+    `https://<server_name>/.well-known/matrix/server` with contents:
+
+    ```json
+    {"m.server": "<target server name>"}
+    ```
+
+    where the target server name is resolved as usual (i.e. SRV lookup, falling
+    back to talking to port 8448).
+
+    In the above example, where synapse is listening on port 8000,
+    `https://example.com/.well-known/matrix/server` should have `m.server` set to one of:
+
+    1. `customer.example.net` ─ with a SRV record on
+       `_matrix._tcp.customer.example.com` pointing to port 8000, or:
+
+    2. `customer.example.net` ─ updating synapse to listen on the default port
+       8448, or:
+
+    3. `customer.example.net:8000` ─ ensuring that if there is a reverse proxy
+       on `customer.example.net:8000` it correctly handles HTTP requests with
+       Host header set to `customer.example.net:8000`.
+
+
+## FAQ
+
+### Synapse 0.99.0 has just been released, what do I need to do right now?
+
+Upgrade as soon as you can in preparation for Synapse 1.0.0, and update your
+TLS certificates as [above](#configuring-certificates-for-compatibility-with-synapse-100).
+
+### What will happen if I do not set up a valid federation certificate immediately?
+
+Nothing initially, but once 1.0.0 is in the wild it will not be possible to
+federate with 1.0.0 servers.
+
+### What will happen if I do nothing at all?
+
+If the admin takes no action at all, and remains on a Synapse < 0.99.0 then the
+homeserver will be unable to federate with those who have implemented
+.well-known. Then, as above, once the month upgrade window has expired the
+homeserver will not be able to federate with any Synapse >= 1.0.0
+
+### When do I need a SRV record or .well-known URI?
+
+If your homeserver listens on the default federation port (8448), and your
+`server_name` points to the host that your homeserver runs on, you do not need an
+SRV record or `.well-known/matrix/server` URI.
+
+For instance, if you registered `example.com` and pointed its DNS A record at a
+fresh Upcloud VPS or similar, you could install Synapse 0.99 on that host,
+giving it a server_name of `example.com`, and it would automatically generate a
+valid TLS certificate for you via Let's Encrypt and no SRV record or
+`.well-known` URI would be needed.
+
+This is the common case, although you can add an SRV record or
+`.well-known/matrix/server` URI for completeness if you wish.
+
+**However**, if your server does not listen on port 8448, or if your `server_name`
+does not point to the host that your homeserver runs on, you will need to let
+other servers know how to find it.
+
+In this case, you should see ["If you do have an SRV record
+currently"](#if-you-do-have-an-srv-record-currently) above.
+
+### Can I still use an SRV record?
+
+Firstly, if you didn't need an SRV record before (because your server is
+listening on port 8448 of your server_name), you certainly don't need one now:
+the defaults are still the same.
+
+If you previously had an SRV record, you can keep using it provided you are
+able to give Synapse a TLS certificate corresponding to your server name. For
+example, suppose you had the following SRV record, which directs matrix traffic
+for example.com to matrix.example.com:443:
+
+```
+_matrix._tcp.example.com. IN SRV 10 5 443 matrix.example.com
+```
+
+In this case, Synapse must be given a certificate for example.com - or be
+configured to acquire one from Let's Encrypt.
+
+If you are unable to give Synapse a certificate for your server_name, you will
+also need to use a .well-known URI instead. However, see also "I have created a
+.well-known URI. Do I still need an SRV record?".
+
+### I have created a .well-known URI. Do I still need an SRV record?
+
+As of Synapse 0.99, Synapse will first check for the existence of a `.well-known`
+URI and follow any delegation it suggests. It will only then check for the
+existence of an SRV record.
+
+That means that the SRV record will often be redundant. However, you should
+remember that there may still be older versions of Synapse in the federation
+which do not understand `.well-known` URIs, so if you removed your SRV record you
+would no longer be able to federate with them.
+
+It is therefore best to leave the SRV record in place for now. Synapse 0.34 and
+earlier will follow the SRV record (and not care about the invalid
+certificate). Synapse 0.99 and later will follow the .well-known URI, with the
+correct certificate chain.
+
+### It used to work just fine, why are you breaking everything?
+
+We have always wanted Matrix servers to be as easy to set up as possible, and
+so back when we started federation in 2014 we didn't want admins to have to go
+through the cumbersome process of buying a valid TLS certificate to run a
+server. This was before Let's Encrypt came along and made getting a free and
+valid TLS certificate straightforward. So instead, we adopted a system based on
+[Perspectives](https://en.wikipedia.org/wiki/Convergence_(SSL)): an approach
+where you check a set of "notary servers" (in practice, homeservers) to vouch
+for the validity of a certificate rather than having it signed by a CA. As long
+as enough different notaries agree on the certificate's validity, then it is
+trusted.
+
+However, in practice this has never worked properly. Most people only use the
+default notary server (matrix.org), leading to inadvertent centralisation which
+we want to eliminate. Meanwhile, we never implemented the full consensus
+algorithm to query the servers participating in a room to determine consensus
+on whether a given certificate is valid. This is fiddly to get right
+(especially in face of sybil attacks), and we found ourselves questioning
+whether it was worth the effort to finish the work and commit to maintaining a
+secure certificate validation system as opposed to focusing on core Matrix
+development.
+
+Meanwhile, Let's Encrypt came along in 2016, and put the final nail in the
+coffin of the Perspectives project (which was already pretty dead). So, the
+Spec Core Team decided that a better approach would be to mandate valid TLS
+certificates for federation alongside the rest of the Web. More details can be
+found in
+[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach).
+
+This results in a breaking change, which is disruptive, but absolutely critical
+for the security model. However, the existence of Let's Encrypt as a trivial
+way to replace the old self-signed certificates with valid CA-signed ones helps
+smooth things over massively, especially as Synapse can now automate Let's
+Encrypt certificate generation if needed.
+
+### Can I manage my own certificates rather than having Synapse renew certificates itself?
+
+Yes, you are welcome to manage your certificates yourself. Synapse will only
+attempt to obtain certificates from Let's Encrypt if you configure it to do
+so.The only requirement is that there is a valid TLS cert present for
+federation end points.
+
+### Do you still recommend against using a reverse proxy on the federation port?
+
+We no longer actively recommend against using a reverse proxy. Many admins will
+find it easier to direct federation traffic to a reverse proxy and manage their
+own TLS certificates, and this is a supported configuration.
+
+See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
+reverse proxy.
+
+### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
+
+Practically speaking, this is no longer necessary.
+
+If you are using a reverse proxy for all of your TLS traffic, then you can set
+`no_tls: True`. In that case, the only reason Synapse needs the certificate is
+to populate a legacy 'tls_fingerprints' field in the federation API. This is
+ignored by Synapse 0.99.0 and later, and the only time pre-0.99 Synapses will
+check it is when attempting to fetch the server keys - and generally this is
+delegated via `matrix.org`, which is on 0.99.0.
+
+However, there is a bug in Synapse 0.99.0
+[4554](<https://github.com/matrix-org/synapse/issues/4554>) which prevents
+Synapse from starting if you do not give it a TLS certificate. To work around
+this, you can give it any TLS certificate at all. This will be fixed soon.
+
+### Do I need the same certificate for the client and federation port?
+
+No. There is nothing stopping you from using different certificates,
+particularly if you are using a reverse proxy. However, Synapse will use the
+same certificate on any ports where TLS is configured.
+
+### How do I tell Synapse to reload my keys/certificates after I replace them?
+
+Synapse will reload the keys and certificates when it receives a SIGHUP - for
+example `kill -HUP $(cat homeserver.pid)`. Alternatively, simply restart
+Synapse, though this will result in downtime while it restarts.
diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst
index 2da833c827..a5c3dc8149 100644
--- a/docs/admin_api/purge_history_api.rst
+++ b/docs/admin_api/purge_history_api.rst
@@ -61,3 +61,11 @@ the following:
     }
 
 The status will be one of ``active``, ``complete``, or ``failed``.
+
+Reclaim disk space (Postgres)
+-----------------------------
+
+To reclaim the disk space and return it to the operating system, you need to run
+`VACUUM FULL;` on the database.
+
+https://www.postgresql.org/docs/current/sql-vacuum.html
diff --git a/docs/admin_api/register_api.rst b/docs/admin_api/register_api.rst
index 16d65c86b3..084e74ebf5 100644
--- a/docs/admin_api/register_api.rst
+++ b/docs/admin_api/register_api.rst
@@ -39,13 +39,13 @@ As an example::
     }
 
 The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
-the shared secret and the content being the nonce, user, password, and either
-the string "admin" or "notadmin", each separated by NULs. For an example of
-generation in Python::
+the shared secret and the content being the nonce, user, password, either the
+string "admin" or "notadmin", and optionally the user_type
+each separated by NULs. For an example of generation in Python::
 
   import hmac, hashlib
 
-  def generate_mac(nonce, user, password, admin=False):
+  def generate_mac(nonce, user, password, admin=False, user_type=None):
 
       mac = hmac.new(
         key=shared_secret,
@@ -59,5 +59,8 @@ generation in Python::
       mac.update(password.encode('utf8'))
       mac.update(b"\x00")
       mac.update(b"admin" if admin else b"notadmin")
+      if user_type:
+          mac.update(b"\x00")
+          mac.update(user_type.encode('utf8'))
 
       return mac.hexdigest()
diff --git a/docs/consent_tracking.md b/docs/consent_tracking.md
index 064eae82f7..c586b5f0b6 100644
--- a/docs/consent_tracking.md
+++ b/docs/consent_tracking.md
@@ -31,7 +31,7 @@ Note that the templates must be stored under a name giving the language of the
 template - currently this must always be `en` (for "English");
 internationalisation support is intended for the future.
 
-The template for the policy itself should be versioned and named according to 
+The template for the policy itself should be versioned and named according to
 the version: for example `1.0.html`. The version of the policy which the user
 has agreed to is stored in the database.
 
@@ -85,6 +85,37 @@ Once this is complete, and the server has been restarted, try visiting
 an error "Missing string query parameter 'u'". It is now possible to manually
 construct URIs where users can give their consent.
 
+### Enabling consent tracking at registration
+
+1. Add the following to your configuration:
+
+   ```yaml
+   user_consent:
+     require_at_registration: true
+     policy_name: "Privacy Policy" # or whatever you'd like to call the policy
+   ```
+
+2. In your consent templates, make use of the `public_version` variable to
+   see if an unauthenticated user is viewing the page. This is typically
+   wrapped around the form that would be used to actually agree to the document:
+
+   ```
+   {% if not public_version %}
+     <!-- The variables used here are only provided when the 'u' param is given to the homeserver -->
+     <form method="post" action="consent">
+       <input type="hidden" name="v" value="{{version}}"/>
+       <input type="hidden" name="u" value="{{user}}"/>
+       <input type="hidden" name="h" value="{{userhmac}}"/>
+       <input type="submit" value="Sure thing!"/>
+     </form>
+   {% endif %}
+   ```
+
+3. Restart Synapse to apply the changes.
+
+Visiting `https://<server>/_matrix/consent` should now give you a view of the privacy
+document. This is what users will be able to see when registering for accounts.
+
 ### Constructing the consent URI
 
 It may be useful to manually construct the "consent URI" for a given user - for
@@ -106,6 +137,12 @@ query parameters:
    `https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
 
 
+Note that not providing a `u` parameter will be interpreted as wanting to view
+the document from an unauthenticated perspective, such as prior to registration.
+Therefore, the `h` parameter is not required in this scenario. To enable this
+behaviour, set `require_at_registration` to `true` in your `user_consent` config.
+
+
 Sending users a server notice asking them to agree to the policy
 ----------------------------------------------------------------
 
diff --git a/docs/federate.md b/docs/federate.md
new file mode 100644
index 0000000000..b7fc09661c
--- /dev/null
+++ b/docs/federate.md
@@ -0,0 +1,123 @@
+Setting up Federation
+=====================
+
+Federation is the process by which users on different servers can participate
+in the same room. For this to work, those other servers must be able to contact
+yours to send messages.
+
+The ``server_name`` configured in the Synapse configuration file (often
+``homeserver.yaml``) defines how resources (users, rooms, etc.) will be
+identified (eg: ``@user:example.com``, ``#room:example.com``). By
+default, it is also the domain that other servers will use to
+try to reach your server (via port 8448). This is easy to set
+up and will work provided you set the ``server_name`` to match your
+machine's public DNS hostname, and provide Synapse with a TLS certificate
+which is valid for your ``server_name``.
+
+Once you have completed the steps necessary to federate, you should be able to 
+join a room via federation. (A good place to start is ``#synapse:matrix.org`` - a 
+room for Synapse admins.)
+
+
+## Delegation
+
+For a more flexible configuration, you can have ``server_name``
+resources (eg: ``@user:example.com``) served by a different host and
+port (eg: ``synapse.example.com:443``). There are two ways to do this:
+
+- adding a ``/.well-known/matrix/server`` URL served on ``https://example.com``.
+- adding a DNS ``SRV`` record in the DNS zone of domain
+  ``example.com``.
+
+Without configuring delegation, the matrix federation will
+expect to find your server via ``example.com:8448``. The following methods
+allow you retain a `server_name` of `example.com` so that your user IDs, room
+aliases, etc continue to look like `*:example.com`, whilst having your
+federation traffic routed to a different server.
+
+### .well-known delegation
+
+To use this method, you need to be able to alter the
+``server_name`` 's https server to serve the ``/.well-known/matrix/server``
+URL. Having an active server (with a valid TLS certificate) serving your
+``server_name`` domain is out of the scope of this documentation.
+
+The URL ``https://<server_name>/.well-known/matrix/server`` should
+return a JSON structure containing the key ``m.server`` like so:
+
+    {
+	    "m.server": "<synapse.server.name>[:<yourport>]"
+    }
+
+In our example, this would mean that URL ``https://example.com/.well-known/matrix/server``
+should return:
+
+    {
+	    "m.server": "synapse.example.com:443"
+    }
+
+Note, specifying a port is optional. If a port is not specified an SRV lookup
+is performed, as described below. If the target of the
+delegation does not have an SRV record, then the port defaults to 8448.
+
+Most installations will not need to configure .well-known. However, it can be
+useful in cases where the admin is hosting on behalf of someone else and
+therefore cannot gain access to the necessary certificate. With .well-known,
+federation servers will check for a valid TLS certificate for the delegated
+hostname (in our example: ``synapse.example.com``).
+
+.well-known support first appeared in Synapse v0.99.0. To federate with older
+servers you may need to additionally configure SRV delegation. Alternatively,
+encourage the server admin in question to upgrade :).
+
+### DNS SRV delegation
+
+To use this delegation method, you need to have write access to your
+``server_name`` 's domain zone DNS records (in our example it would be
+``example.com`` DNS zone).
+
+This method requires the target server to provide a
+valid TLS certificate for the original ``server_name``.
+
+You need to add a SRV record in your ``server_name`` 's DNS zone with
+this format:
+
+     _matrix._tcp.<yourdomain.com> <ttl> IN SRV <priority> <weight> <port> <synapse.server.name>
+
+In our example, we would need to add this SRV record in the
+``example.com`` DNS zone:
+
+     _matrix._tcp.example.com. 3600 IN SRV 10 5 443 synapse.example.com.
+
+Once done and set up, you can check the DNS record with ``dig -t srv
+_matrix._tcp.<server_name>``. In our example, we would expect this:
+
+    $ dig -t srv _matrix._tcp.example.com
+    _matrix._tcp.example.com. 3600    IN      SRV     10 0 443 synapse.example.com.
+
+Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
+directly to the server hosting the synapse instance.
+
+## Troubleshooting
+
+You can use the [federation tester](
+<https://matrix.org/federationtester>) to check if your homeserver is
+configured correctly. Alternatively try the [JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
+Note that you'll have to modify this URL to replace ``DOMAIN`` with your
+``server_name``. Hitting the API directly provides extra detail.
+
+The typical failure mode for federation is that when the server tries to join
+a room, it is rejected with "401: Unauthorized". Generally this means that other
+servers in the room could not access yours. (Joining a room over federation is
+a complicated dance which requires connections in both directions).
+
+Another common problem is that people on other servers can't join rooms that
+you invite them to. This can be caused by an incorrectly-configured reverse
+proxy: see [reverse_proxy.rst](<reverse_proxy.rst>) for instructions on how to correctly
+configure a reverse proxy.
+
+## Running a Demo Federation of Synapses
+
+If you want to get up and running quickly with a trio of homeservers in a
+private federation, there is a script in the ``demo`` directory. This is mainly
+useful just for development purposes. See [demo/README](<../demo/README>).
diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst
index 82ac4f91e5..27cde11cf7 100644
--- a/docs/log_contexts.rst
+++ b/docs/log_contexts.rst
@@ -163,7 +163,7 @@ the logcontext was set, this will make things work out ok: provided
 It's all too easy to forget to ``yield``: for instance if we forgot that
 ``do_some_stuff`` returned a deferred, we might plough on regardless. This
 leads to a mess; it will probably work itself out eventually, but not before
-a load of stuff has been logged against the wrong content. (Normally, other
+a load of stuff has been logged against the wrong context. (Normally, other
 things will break, more obviously, if you forget to ``yield``, so this tends
 not to be a major problem in practice.)
 
@@ -440,3 +440,59 @@ To conclude: I think this scheme would have worked equally well, with less
 danger of messing it up, and probably made some more esoteric code easier to
 write. But again — changing the conventions of the entire Synapse codebase is
 not a sensible option for the marginal improvement offered.
+
+
+A note on garbage-collection of Deferred chains
+-----------------------------------------------
+
+It turns out that our logcontext rules do not play nicely with Deferred
+chains which get orphaned and garbage-collected.
+
+Imagine we have some code that looks like this:
+
+.. code:: python
+
+    listener_queue = []
+
+    def on_something_interesting():
+        for d in listener_queue:
+            d.callback("foo")
+
+    @defer.inlineCallbacks
+    def await_something_interesting():
+        new_deferred = defer.Deferred()
+        listener_queue.append(new_deferred)
+
+        with PreserveLoggingContext():
+            yield new_deferred
+
+Obviously, the idea here is that we have a bunch of things which are waiting
+for an event. (It's just an example of the problem here, but a relatively
+common one.)
+
+Now let's imagine two further things happen. First of all, whatever was
+waiting for the interesting thing goes away. (Perhaps the request times out,
+or something *even more* interesting happens.)
+
+Secondly, let's suppose that we decide that the interesting thing is never
+going to happen, and we reset the listener queue:
+
+.. code:: python
+
+    def reset_listener_queue():
+        listener_queue.clear()
+
+So, both ends of the deferred chain have now dropped their references, and the
+deferred chain is now orphaned, and will be garbage-collected at some point.
+Note that ``await_something_interesting`` is a generator function, and when
+Python garbage-collects generator functions, it gives them a chance to clean
+up by making the ``yield`` raise a ``GeneratorExit`` exception. In our case,
+that means that the ``__exit__`` handler of ``PreserveLoggingContext`` will
+carefully restore the request context, but there is now nothing waiting for
+its return, so the request context is never cleared.
+
+To reiterate, this problem only arises when *both* ends of a deferred chain
+are dropped. Dropping the the reference to a deferred you're supposed to be
+calling is probably bad practice, so this doesn't actually happen too much.
+Unfortunately, when it does happen, it will lead to leaked logcontexts which
+are incredibly hard to track down.
diff --git a/docs/privacy_policy_templates/en/1.0.html b/docs/privacy_policy_templates/en/1.0.html
index 55c5e4b612..321c7e4671 100644
--- a/docs/privacy_policy_templates/en/1.0.html
+++ b/docs/privacy_policy_templates/en/1.0.html
@@ -12,12 +12,15 @@
     <p>
       All your base are belong to us.
     </p>
-    <form method="post" action="consent">
-      <input type="hidden" name="v" value="{{version}}"/>
-      <input type="hidden" name="u" value="{{user}}"/>
-      <input type="hidden" name="h" value="{{userhmac}}"/>
-      <input type="submit" value="Sure thing!"/>
-    </form>
+    {% if not public_version %}
+      <!-- The variables used here are only provided when the 'u' param is given to the homeserver -->
+      <form method="post" action="consent">
+        <input type="hidden" name="v" value="{{version}}"/>
+        <input type="hidden" name="u" value="{{user}}"/>
+        <input type="hidden" name="h" value="{{userhmac}}"/>
+        <input type="submit" value="Sure thing!"/>
+      </form>
+    {% endif %}
   {% endif %}
   </body>
 </html>
diff --git a/docs/reverse_proxy.rst b/docs/reverse_proxy.rst
new file mode 100644
index 0000000000..4706061eba
--- /dev/null
+++ b/docs/reverse_proxy.rst
@@ -0,0 +1,112 @@
+Using a reverse proxy with Synapse
+==================================
+
+It is recommended to put a reverse proxy such as
+`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
+`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
+`Caddy <https://caddyserver.com/docs/proxy>`_ or
+`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
+doing so is that it means that you can expose the default https port (443) to
+Matrix clients without needing to run Synapse with root privileges.
+
+**NOTE**: Your reverse proxy must not 'canonicalise' or 'normalise' the
+requested URI in any way (for example, by decoding ``%xx`` escapes). Beware
+that Apache *will* canonicalise URIs unless you specifify ``nocanon``.
+
+When setting up a reverse proxy, remember that Matrix clients and other Matrix
+servers do not necessarily need to connect to your server via the same server
+name or port. Indeed, clients will use port 443 by default, whereas servers
+default to port 8448. Where these are different, we refer to the 'client port'
+and the 'federation port'. See `Setting up federation
+<../README.rst#setting-up-federation>`_ for more details of the algorithm used for
+federation connections.
+
+Let's assume that we expect clients to connect to our server at
+``https://matrix.example.com``, and other servers to connect at
+``https://example.com:8448``. Here are some example configurations:
+
+* nginx::
+
+      server {
+          listen 443 ssl;
+          listen [::]:443 ssl;
+          server_name matrix.example.com;
+
+          location /_matrix {
+              proxy_pass http://localhost:8008;
+              proxy_set_header X-Forwarded-For $remote_addr;
+          }
+      }
+
+      server {
+          listen 8448 ssl default_server;
+          listen [::]:8448 ssl default_server;
+          server_name example.com;
+
+          location / {
+              proxy_pass http://localhost:8008;
+              proxy_set_header X-Forwarded-For $remote_addr;
+          }
+      }
+
+* Caddy::
+
+      matrix.example.com {
+        proxy /_matrix http://localhost:8008 {
+          transparent
+        }
+      }
+
+      example.com:8448 {
+        proxy / http://localhost:8008 {
+          transparent
+        }
+      }
+
+* Apache (note the ``nocanon`` options here!)::
+
+      <VirtualHost *:443>
+          SSLEngine on
+          ServerName matrix.example.com;
+
+          <Location /_matrix>
+              ProxyPass http://127.0.0.1:8008/_matrix nocanon
+              ProxyPassReverse http://127.0.0.1:8008/_matrix
+          </Location>
+      </VirtualHost>
+
+      <VirtualHost *:8448>
+          SSLEngine on
+          ServerName example.com;
+
+          <Location /_matrix>
+              ProxyPass http://127.0.0.1:8008/_matrix nocanon
+              ProxyPassReverse http://127.0.0.1:8008/_matrix
+          </Location>
+      </VirtualHost>
+
+* HAProxy::
+
+      frontend https
+        bind 0.0.0.0:443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
+        bind :::443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
+        
+        # Matrix client traffic
+        acl matrix hdr(host) -i matrix.example.com
+        use_backend matrix if matrix
+        
+      frontend matrix-federation
+        bind 0.0.0.0:8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
+        bind :::8448 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
+        default_backend matrix
+        
+      backend matrix
+        server matrix 127.0.0.1:8008
+
+You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
+for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
+recorded correctly.
+
+Having done so, you can then use ``https://matrix.example.com`` (instead of
+``https://matrix.example.com:8448``) as the "Custom server" when connecting to
+Synapse from a client.
diff --git a/docs/tcp_replication.rst b/docs/tcp_replication.rst
index 62225ba6f4..73436cea62 100644
--- a/docs/tcp_replication.rst
+++ b/docs/tcp_replication.rst
@@ -137,7 +137,6 @@ for each stream so that on reconneciton it can start streaming from the correct
 place. Note: not all RDATA have valid tokens due to batching. See
 ``RdataCommand`` for more details.
 
-
 Example
 ~~~~~~~
 
@@ -221,3 +220,28 @@ SYNC (S, C)
 
 See ``synapse/replication/tcp/commands.py`` for a detailed description and the
 format of each command.
+
+
+Cache Invalidation Stream
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The cache invalidation stream is used to inform workers when they need to
+invalidate any of their caches in the data store. This is done by streaming all
+cache invalidations done on master down to the workers, assuming that any caches
+on the workers also exist on the master.
+
+Each individual cache invalidation results in a row being sent down replication,
+which includes the cache name (the name of the function) and they key to
+invalidate. For example::
+
+    > RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
+
+However, there are times when a number of caches need to be invalidated at the
+same time with the same key. To reduce traffic we batch those invalidations into
+a single poke by defining a special cache name that workers understand to mean
+to expand to invalidate the correct caches.
+
+Currently the special cache names are declared in ``synapse/storage/_base.py``
+and are:
+
+1. ``cs_cache_fake`` ─ invalidates caches that depend on the current state
diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst
index e48628ce6e..a2fc5c8820 100644
--- a/docs/turn-howto.rst
+++ b/docs/turn-howto.rst
@@ -40,7 +40,6 @@ You may be able to setup coturn via your package manager,  or set it up manually
  4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
     lines, with example values, are::
 
-      lt-cred-mech
       use-auth-secret
       static-auth-secret=[your secret key here]
       realm=turn.myserver.org
@@ -52,7 +51,7 @@ You may be able to setup coturn via your package manager,  or set it up manually
 
  5. Consider your security settings.  TURN lets users request a relay
     which will connect to arbitrary IP addresses and ports.  At the least
-    we recommend:
+    we recommend::
 
        # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
        no-tcp-relay
@@ -106,7 +105,7 @@ Your home server configuration file needs the following extra keys:
     to refresh credentials. The TURN REST API specification recommends
     one day (86400000).
 
-  4. "turn_allow_guests": Whether to allow guest users to use the TURN
+ 4. "turn_allow_guests": Whether to allow guest users to use the TURN
     server.  This is enabled by default, as otherwise VoIP will not
     work reliably for guests.  However, it does introduce a security risk
     as it lets guests connect to arbitrary endpoints without having gone
diff --git a/docs/workers.rst b/docs/workers.rst
index 101e950020..3ba5879f76 100644
--- a/docs/workers.rst
+++ b/docs/workers.rst
@@ -26,9 +26,8 @@ Configuration
 To make effective use of the workers, you will need to configure an HTTP
 reverse-proxy such as nginx or haproxy, which will direct incoming requests to
 the correct worker, or to the main synapse instance. Note that this includes
-requests made to the federation port. The caveats regarding running a
-reverse-proxy on the federation port still apply (see
-https://github.com/matrix-org/synapse/blob/master/README.rst#reverse-proxying-the-federation-port).
+requests made to the federation port. See `<reverse_proxy.rst>`_ for
+information on setting up a reverse proxy.
 
 To enable workers, you need to add two replication listeners to the master
 synapse, e.g.::
@@ -223,6 +222,13 @@ following regular expressions::
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
+    ^/_matrix/client/(api/v1|r0|unstable)/login$
+
+Additionally, the following REST endpoints can be handled, but all requests must
+be routed to the same instance::
+
+    ^/_matrix/client/(r0|unstable)/register$
+
 
 ``synapse.app.user_dir``
 ~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh
index d95ca846c4..016afb8baa 100755
--- a/jenkins/prepare_synapse.sh
+++ b/jenkins/prepare_synapse.sh
@@ -14,22 +14,3 @@ fi
 
 # set up the virtualenv
 tox -e py27 --notest -v
-
-TOX_BIN=$TOX_DIR/py27/bin
-
-# cryptography 2.2 requires setuptools >= 18.5.
-#
-# older versions of virtualenv (?) give us a virtualenv with the same version
-# of setuptools as is installed on the system python (and tox runs virtualenv
-# under python3, so we get the version of setuptools that is installed on that).
-#
-# anyway, make sure that we have a recent enough setuptools.
-$TOX_BIN/pip install 'setuptools>=18.5'
-
-# we also need a semi-recent version of pip, because old ones fail to install
-# the "enum34" dependency of cryptography.
-$TOX_BIN/pip install 'pip>=10'
-
-{ python synapse/python_dependencies.py
-  echo lxml
-} | xargs $TOX_BIN/pip install
diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages
new file mode 100755
index 0000000000..6b9be99060
--- /dev/null
+++ b/scripts-dev/build_debian_packages
@@ -0,0 +1,154 @@
+#!/usr/bin/env python3
+
+# Build the Debian packages using Docker images.
+#
+# This script builds the Docker images and then executes them sequentially, each
+# one building a Debian package for the targeted operating system. It is
+# designed to be a "single command" to produce all the images.
+#
+# By default, builds for all known distributions, but a list of distributions
+# can be passed on the commandline for debugging.
+
+import argparse
+import os
+import signal
+import subprocess
+import sys
+import threading
+from concurrent.futures import ThreadPoolExecutor
+
+DISTS = (
+    "debian:stretch",
+    "debian:buster",
+    "debian:sid",
+    "ubuntu:xenial",
+    "ubuntu:bionic",
+    "ubuntu:cosmic",
+)
+
+DESC = '''\
+Builds .debs for synapse, using a Docker image for the build environment.
+
+By default, builds for all known distributions, but a list of distributions
+can be passed on the commandline for debugging.
+'''
+
+
+class Builder(object):
+    def __init__(self, redirect_stdout=False):
+        self.redirect_stdout = redirect_stdout
+        self.active_containers = set()
+        self._lock = threading.Lock()
+        self._failed = False
+
+    def run_build(self, dist):
+        """Build deb for a single distribution"""
+
+        if self._failed:
+            print("not building %s due to earlier failure" % (dist, ))
+            raise Exception("failed")
+
+        try:
+            self._inner_build(dist)
+        except Exception as e:
+            print("build of %s failed: %s" % (dist, e), file=sys.stderr)
+            self._failed = True
+            raise
+
+    def _inner_build(self, dist):
+        projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+        os.chdir(projdir)
+
+        tag = dist.split(":", 1)[1]
+
+        # Make the dir where the debs will live.
+        #
+        # Note that we deliberately put this outside the source tree, otherwise
+        # we tend to get source packages which are full of debs. (We could hack
+        # around that with more magic in the build_debian.sh script, but that
+        # doesn't solve the problem for natively-run dpkg-buildpakage).
+        debsdir = os.path.join(projdir, '../debs')
+        os.makedirs(debsdir, exist_ok=True)
+
+        if self.redirect_stdout:
+            logfile = os.path.join(debsdir, "%s.buildlog" % (tag, ))
+            print("building %s: directing output to %s" % (dist, logfile))
+            stdout = open(logfile, "w")
+        else:
+            stdout = None
+
+        # first build a docker image for the build environment
+        subprocess.check_call([
+            "docker", "build",
+            "--tag", "dh-venv-builder:" + tag,
+            "--build-arg", "distro=" + dist,
+            "-f", "docker/Dockerfile-dhvirtualenv",
+            "docker",
+        ], stdout=stdout, stderr=subprocess.STDOUT)
+
+        container_name = "synapse_build_" + tag
+        with self._lock:
+            self.active_containers.add(container_name)
+
+        # then run the build itself
+        subprocess.check_call([
+            "docker", "run",
+            "--rm",
+            "--name", container_name,
+            "--volume=" + projdir + ":/synapse/source:ro",
+            "--volume=" + debsdir + ":/debs",
+            "-e", "TARGET_USERID=%i" % (os.getuid(), ),
+            "-e", "TARGET_GROUPID=%i" % (os.getgid(), ),
+            "dh-venv-builder:" + tag,
+        ], stdout=stdout, stderr=subprocess.STDOUT)
+
+        with self._lock:
+            self.active_containers.remove(container_name)
+
+        if stdout is not None:
+            stdout.close()
+            print("Completed build of %s" % (dist, ))
+
+    def kill_containers(self):
+        with self._lock:
+            active = list(self.active_containers)
+
+        for c in active:
+            print("killing container %s" % (c,))
+            subprocess.run([
+                "docker", "kill", c,
+            ], stdout=subprocess.DEVNULL)
+            with self._lock:
+                self.active_containers.remove(c)
+
+
+def run_builds(dists, jobs=1):
+    builder = Builder(redirect_stdout=(jobs > 1))
+
+    def sig(signum, _frame):
+        print("Caught SIGINT")
+        builder.kill_containers()
+    signal.signal(signal.SIGINT, sig)
+
+    with ThreadPoolExecutor(max_workers=jobs) as e:
+        res = e.map(builder.run_build, dists)
+
+    # make sure we consume the iterable so that exceptions are raised.
+    for r in res:
+        pass
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(
+        description=DESC,
+    )
+    parser.add_argument(
+        '-j', '--jobs', type=int, default=1,
+        help='specify the number of builds to run in parallel',
+    )
+    parser.add_argument(
+        'dist', nargs='*', default=DISTS,
+        help='a list of distributions to build for. Default: %(default)s',
+    )
+    args = parser.parse_args()
+    run_builds(dists=args.dist, jobs=args.jobs)
diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment
new file mode 100755
index 0000000000..e4a22bae61
--- /dev/null
+++ b/scripts-dev/check-newsfragment
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# A script which checks that an appropriate news file has been added on this
+# branch.
+
+set -e
+
+# make sure that origin/develop is up to date
+git remote set-branches --add origin develop
+git fetch --depth=1 origin develop
+
+UPSTREAM=origin/develop
+
+# if there are changes in the debian directory, check that the debian changelog
+# has been updated
+if ! git diff --quiet $UPSTREAM... -- debian; then
+    if git diff --quiet $UPSTREAM... -- debian/changelog; then
+        echo "Updates to debian directory, but no update to the changelog." >&2
+        exit 1
+    fi
+fi
+
+# if there are changes *outside* the debian directory, check that the
+# newsfragments have been updated.
+if git diff --name-only $UPSTREAM... | grep -qv '^develop/'; then
+    tox -e check-newsfragment
+fi
+
+echo
+echo "--------------------------"
+echo
+
+# check that any new newsfiles on this branch end with a full stop.
+for f in `git diff --name-only $UPSTREAM... -- changelog.d`; do
+    lastchar=`tr -d '\n' < $f | tail -c 1`
+    if [ $lastchar != '.' ]; then
+        echo -e "\e[31mERROR: newsfragment $f does not end with a '.'\e[39m" >&2
+        exit 1
+    fi
+done
+
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
index 2566ce7cef..e0287c8c6c 100755
--- a/scripts-dev/federation_client.py
+++ b/scripts-dev/federation_client.py
@@ -154,10 +154,15 @@ def request_json(method, origin_name, origin_key, destination, path, content):
     s = requests.Session()
     s.mount("matrix://", MatrixConnectionAdapter())
 
+    headers = {"Host": destination, "Authorization": authorization_headers[0]}
+
+    if method == "POST":
+        headers["Content-Type"] = "application/json"
+
     result = s.request(
         method=method,
         url=dest,
-        headers={"Host": destination, "Authorization": authorization_headers[0]},
+        headers=headers,
         verify=False,
         data=content,
     )
@@ -203,7 +208,7 @@ def main():
     parser.add_argument(
         "-X",
         "--method",
-        help="HTTP method to use for the request. Defaults to GET if --data is"
+        help="HTTP method to use for the request. Defaults to GET if --body is"
         "unspecified, POST if it is.",
     )
 
diff --git a/scripts-dev/make_identicons.pl b/scripts-dev/make_identicons.pl
deleted file mode 100755
index cbff63e298..0000000000
--- a/scripts-dev/make_identicons.pl
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env perl
-
-use strict;
-use warnings;
-
-use DBI;
-use DBD::SQLite;
-use JSON;
-use Getopt::Long;
-
-my $db; # = "homeserver.db";
-my $server = "http://localhost:8008";
-my $size = 320;
-
-GetOptions("db|d=s",     \$db,
-           "server|s=s", \$server,
-           "width|w=i",  \$size) or usage();
-
-usage() unless $db;
-
-my $dbh = DBI->connect("dbi:SQLite:dbname=$db","","") || die $DBI::errstr;
-
-my $res = $dbh->selectall_arrayref("select token, name from access_tokens, users where access_tokens.user_id = users.id group by user_id") || die $DBI::errstr;
-
-foreach (@$res) {
-    my ($token, $mxid) = ($_->[0], $_->[1]);
-    my ($user_id) = ($mxid =~ m/@(.*):/);
-    my ($url) = $dbh->selectrow_array("select avatar_url from profiles where user_id=?", undef, $user_id);
-    if (!$url || $url =~ /#auto$/) {
-        `curl -s -o tmp.png "$server/_matrix/media/v1/identicon?name=${mxid}&width=$size&height=$size"`;
-        my $json = `curl -s -X POST -H "Content-Type: image/png" -T "tmp.png" $server/_matrix/media/v1/upload?access_token=$token`;
-        my $content_uri = from_json($json)->{content_uri};
-        `curl -X PUT -H "Content-Type: application/json" --data '{ "avatar_url": "${content_uri}#auto"}' $server/_matrix/client/api/v1/profile/${mxid}/avatar_url?access_token=$token`;
-    }
-}
-
-sub usage {
-    die "usage: ./make-identicons.pl\n\t-d database [e.g. homeserver.db]\n\t-s homeserver (default: http://localhost:8008)\n\t-w identicon size in pixels (default 320)";
-}
\ No newline at end of file
diff --git a/scripts/generate_config b/scripts/generate_config
new file mode 100755
index 0000000000..61c5f049e8
--- /dev/null
+++ b/scripts/generate_config
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+import argparse
+import sys
+
+from synapse.config.homeserver import HomeServerConfig
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--config-dir",
+        default="CONFDIR",
+
+        help="The path where the config files are kept. Used to create filenames for "
+             "things like the log config and the signing key. Default: %(default)s",
+    )
+
+    parser.add_argument(
+        "--data-dir",
+        default="DATADIR",
+        help="The path where the data files are kept. Used to create filenames for "
+             "things like the database and media store. Default: %(default)s",
+    )
+
+    parser.add_argument(
+        "--server-name",
+        default="SERVERNAME",
+        help="The server name. Used to initialise the server_name config param, but also "
+             "used in the names of some of the config files. Default: %(default)s",
+    )
+
+    parser.add_argument(
+        "--report-stats",
+        action="store",
+        help="Whether the generated config reports anonymized usage statistics",
+        choices=["yes", "no"],
+    )
+
+    parser.add_argument(
+        "--generate-secrets",
+        action="store_true",
+        help="Enable generation of new secrets for things like the macaroon_secret_key."
+             "By default, these parameters will be left unset."
+    )
+
+    parser.add_argument(
+        "-o", "--output-file",
+        type=argparse.FileType('w'),
+        default=sys.stdout,
+        help="File to write the configuration to. Default: stdout",
+    )
+
+    args = parser.parse_args()
+
+    report_stats = args.report_stats
+    if report_stats is not None:
+        report_stats = report_stats == "yes"
+
+    conf = HomeServerConfig().generate_config(
+        config_dir_path=args.config_dir,
+        data_dir_path=args.data_dir,
+        server_name=args.server_name,
+        generate_secrets=args.generate_secrets,
+        report_stats=report_stats,
+    )
+
+    args.output_file.write(conf)
diff --git a/scripts/hash_password b/scripts/hash_password
index a62bb5aa83..a1eb0769da 100755
--- a/scripts/hash_password
+++ b/scripts/hash_password
@@ -3,13 +3,15 @@
 import argparse
 import getpass
 import sys
+import unicodedata
 
 import bcrypt
 import yaml
 
-bcrypt_rounds=12
+bcrypt_rounds = 12
 password_pepper = ""
 
+
 def prompt_for_pass():
     password = getpass.getpass("Password: ")
 
@@ -23,19 +25,27 @@ def prompt_for_pass():
 
     return password
 
+
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(
-        description="Calculate the hash of a new password, so that passwords"
-                    " can be reset")
+        description=(
+            "Calculate the hash of a new password, so that passwords can be reset"
+        )
+    )
     parser.add_argument(
-        "-p", "--password",
+        "-p",
+        "--password",
         default=None,
         help="New password for user. Will prompt if omitted.",
     )
     parser.add_argument(
-        "-c", "--config",
+        "-c",
+        "--config",
         type=argparse.FileType('r'),
-        help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
+        help=(
+            "Path to server config file. "
+            "Used to read in bcrypt_rounds and password_pepper."
+        ),
     )
 
     args = parser.parse_args()
@@ -49,4 +59,21 @@ if __name__ == "__main__":
     if not password:
         password = prompt_for_pass()
 
-    print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
+    # On Python 2, make sure we decode it to Unicode before we normalise it
+    if isinstance(password, bytes):
+        try:
+            password = password.decode(sys.stdin.encoding)
+        except UnicodeDecodeError:
+            print(
+                "ERROR! Your password is not decodable using your terminal encoding (%s)."
+                % (sys.stdin.encoding,)
+            )
+
+    pw = unicodedata.normalize("NFKC", password)
+
+    hashed = bcrypt.hashpw(
+        pw.encode('utf8') + password_pepper.encode("utf8"),
+        bcrypt.gensalt(bcrypt_rounds),
+    ).decode('ascii')
+
+    print(hashed)
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 3c7b606323..2fa01d1a18 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -53,6 +53,7 @@ BOOLEAN_COLUMNS = {
     "group_summary_users": ["is_public"],
     "group_roles": ["is_public"],
     "local_group_membership": ["is_publicised", "is_admin"],
+    "e2e_room_keys": ["is_verified"],
 }
 
 
diff --git a/setup.py b/setup.py
index 00b69c43f5..55b1b10a77 100755
--- a/setup.py
+++ b/setup.py
@@ -84,13 +84,25 @@ version = exec_file(("synapse", "__init__.py"))["__version__"]
 dependencies = exec_file(("synapse", "python_dependencies.py"))
 long_description = read_file(("README.rst",))
 
+REQUIREMENTS = dependencies['REQUIREMENTS']
+CONDITIONAL_REQUIREMENTS = dependencies['CONDITIONAL_REQUIREMENTS']
+
+# Make `pip install matrix-synapse[all]` install all the optional dependencies.
+ALL_OPTIONAL_REQUIREMENTS = set()
+
+for optional_deps in CONDITIONAL_REQUIREMENTS.values():
+    ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
+
+CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
+
+
 setup(
     name="matrix-synapse",
     version=version,
     packages=find_packages(exclude=["tests", "tests.*"]),
     description="Reference homeserver for the Matrix decentralised comms protocol",
-    install_requires=dependencies['requirements'](include_conditional=True).keys(),
-    dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
+    install_requires=REQUIREMENTS,
+    extras_require=CONDITIONAL_REQUIREMENTS,
     include_package_data=True,
     zip_safe=False,
     long_description=long_description,
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 89ea9a9775..25c10244d3 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
+# Copyright 2018-9 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -27,4 +27,4 @@ try:
 except ImportError:
     pass
 
-__version__ = "0.33.8"
+__version__ = "0.99.2"
diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py
index 70cecde486..6e93f5a0c6 100644
--- a/synapse/_scripts/register_new_matrix_user.py
+++ b/synapse/_scripts/register_new_matrix_user.py
@@ -35,6 +35,7 @@ def request_registration(
     server_location,
     shared_secret,
     admin=False,
+    user_type=None,
     requests=_requests,
     _print=print,
     exit=sys.exit,
@@ -45,7 +46,7 @@ def request_registration(
     # Get the nonce
     r = requests.get(url, verify=False)
 
-    if r.status_code is not 200:
+    if r.status_code != 200:
         _print("ERROR! Received %d %s" % (r.status_code, r.reason))
         if 400 <= r.status_code < 500:
             try:
@@ -65,6 +66,9 @@ def request_registration(
     mac.update(password.encode('utf8'))
     mac.update(b"\x00")
     mac.update(b"admin" if admin else b"notadmin")
+    if user_type:
+        mac.update(b"\x00")
+        mac.update(user_type.encode('utf8'))
 
     mac = mac.hexdigest()
 
@@ -74,12 +78,13 @@ def request_registration(
         "password": password,
         "mac": mac,
         "admin": admin,
+        "user_type": user_type,
     }
 
     _print("Sending registration request...")
     r = requests.post(url, json=data, verify=False)
 
-    if r.status_code is not 200:
+    if r.status_code != 200:
         _print("ERROR! Received %d %s" % (r.status_code, r.reason))
         if 400 <= r.status_code < 500:
             try:
@@ -91,7 +96,7 @@ def request_registration(
     _print("Success!")
 
 
-def register_new_user(user, password, server_location, shared_secret, admin):
+def register_new_user(user, password, server_location, shared_secret, admin, user_type):
     if not user:
         try:
             default_user = getpass.getuser()
@@ -129,7 +134,8 @@ def register_new_user(user, password, server_location, shared_secret, admin):
         else:
             admin = False
 
-    request_registration(user, password, server_location, shared_secret, bool(admin))
+    request_registration(user, password, server_location, shared_secret,
+                         bool(admin), user_type)
 
 
 def main():
@@ -154,6 +160,12 @@ def main():
         default=None,
         help="New password for user. Will prompt if omitted.",
     )
+    parser.add_argument(
+        "-t",
+        "--user_type",
+        default=None,
+        help="User type as specified in synapse.api.constants.UserTypes",
+    )
     admin_group = parser.add_mutually_exclusive_group()
     admin_group.add_argument(
         "-a",
@@ -208,7 +220,8 @@ def main():
     if args.admin or args.no_admin:
         admin = args.admin
 
-    register_new_user(args.user, args.password, args.server_url, secret, admin)
+    register_new_user(args.user, args.password, args.server_url, secret,
+                      admin, args.user_type)
 
 
 if __name__ == "__main__":
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 1401e8a2b0..03dd99fcd0 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -65,7 +65,7 @@ class Auth(object):
         register_cache("cache", "token_cache", self.token_cache)
 
     @defer.inlineCallbacks
-    def check_from_context(self, event, context, do_sig_check=True):
+    def check_from_context(self, room_version, event, context, do_sig_check=True):
         prev_state_ids = yield context.get_prev_state_ids(self.store)
         auth_events_ids = yield self.compute_auth_events(
             event, prev_state_ids, for_verification=True,
@@ -74,12 +74,16 @@ class Auth(object):
         auth_events = {
             (e.type, e.state_key): e for e in itervalues(auth_events)
         }
-        self.check(event, auth_events=auth_events, do_sig_check=do_sig_check)
+        self.check(
+            room_version, event,
+            auth_events=auth_events, do_sig_check=do_sig_check,
+        )
 
-    def check(self, event, auth_events, do_sig_check=True):
+    def check(self, room_version, event, auth_events, do_sig_check=True):
         """ Checks if this event is correctly authed.
 
         Args:
+            room_version (str): version of the room
             event: the event being checked.
             auth_events (dict: event-key -> event): the existing room state.
 
@@ -88,7 +92,9 @@ class Auth(object):
             True if the auth checks pass.
         """
         with Measure(self.clock, "auth.check"):
-            event_auth.check(event, auth_events, do_sig_check=do_sig_check)
+            event_auth.check(
+                room_version, event, auth_events, do_sig_check=do_sig_check
+            )
 
     @defer.inlineCallbacks
     def check_joined_room(self, room_id, user_id, current_state=None):
@@ -188,18 +194,34 @@ class Auth(object):
         """
         # Can optionally look elsewhere in the request (e.g. headers)
         try:
+            ip_addr = self.hs.get_ip_from_request(request)
+            user_agent = request.requestHeaders.getRawHeaders(
+                b"User-Agent",
+                default=[b""]
+            )[0].decode('ascii', 'surrogateescape')
+
+            access_token = self.get_access_token_from_request(
+                request, self.TOKEN_NOT_FOUND_HTTP_STATUS
+            )
+
             user_id, app_service = yield self._get_appservice_user_id(request)
 
             if user_id:
                 request.authenticated_entity = user_id
+
+                if ip_addr and self.hs.config.track_appservice_user_ips:
+                    yield self.store.insert_client_ip(
+                        user_id=user_id,
+                        access_token=access_token,
+                        ip=ip_addr,
+                        user_agent=user_agent,
+                        device_id="dummy-device",  # stubbed
+                    )
+
                 defer.returnValue(
                     synapse.types.create_requester(user_id, app_service=app_service)
                 )
 
-            access_token = self.get_access_token_from_request(
-                request, self.TOKEN_NOT_FOUND_HTTP_STATUS
-            )
-
             user_info = yield self.get_user_by_access_token(access_token, rights)
             user = user_info["user"]
             token_id = user_info["token_id"]
@@ -209,11 +231,6 @@ class Auth(object):
             # stubbed out.
             device_id = user_info.get("device_id")
 
-            ip_addr = self.hs.get_ip_from_request(request)
-            user_agent = request.requestHeaders.getRawHeaders(
-                b"User-Agent",
-                default=[b""]
-            )[0].decode('ascii', 'surrogateescape')
             if user and access_token and ip_addr:
                 yield self.store.insert_client_ip(
                     user_id=user.to_string(),
@@ -291,20 +308,28 @@ class Auth(object):
         Raises:
             AuthError if no user by that token exists or the token is invalid.
         """
-        try:
-            user_id, guest = self._parse_and_validate_macaroon(token, rights)
-        except _InvalidMacaroonException:
-            # doesn't look like a macaroon: treat it as an opaque token which
-            # must be in the database.
-            # TODO: it would be nice to get rid of this, but apparently some
-            # people use access tokens which aren't macaroons
+
+        if rights == "access":
+            # first look in the database
             r = yield self._look_up_user_by_access_token(token)
-            defer.returnValue(r)
+            if r:
+                defer.returnValue(r)
 
+        # otherwise it needs to be a valid macaroon
         try:
+            user_id, guest = self._parse_and_validate_macaroon(token, rights)
             user = UserID.from_string(user_id)
 
-            if guest:
+            if rights == "access":
+                if not guest:
+                    # non-guest access tokens must be in the database
+                    logger.warning("Unrecognised access token - not in store.")
+                    raise AuthError(
+                        self.TOKEN_NOT_FOUND_HTTP_STATUS,
+                        "Unrecognised access token.",
+                        errcode=Codes.UNKNOWN_TOKEN,
+                    )
+
                 # Guest access tokens are not stored in the database (there can
                 # only be one access token per guest, anyway).
                 #
@@ -345,31 +370,15 @@ class Auth(object):
                     "device_id": None,
                 }
             else:
-                # This codepath exists for several reasons:
-                #   * so that we can actually return a token ID, which is used
-                #     in some parts of the schema (where we probably ought to
-                #     use device IDs instead)
-                #   * the only way we currently have to invalidate an
-                #     access_token is by removing it from the database, so we
-                #     have to check here that it is still in the db
-                #   * some attributes (notably device_id) aren't stored in the
-                #     macaroon. They probably should be.
-                # TODO: build the dictionary from the macaroon once the
-                # above are fixed
-                ret = yield self._look_up_user_by_access_token(token)
-                if ret["user"] != user:
-                    logger.error(
-                        "Macaroon user (%s) != DB user (%s)",
-                        user,
-                        ret["user"]
-                    )
-                    raise AuthError(
-                        self.TOKEN_NOT_FOUND_HTTP_STATUS,
-                        "User mismatch in macaroon",
-                        errcode=Codes.UNKNOWN_TOKEN
-                    )
+                raise RuntimeError("Unknown rights setting %s", rights)
             defer.returnValue(ret)
-        except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
+        except (
+            _InvalidMacaroonException,
+            pymacaroons.exceptions.MacaroonException,
+            TypeError,
+            ValueError,
+        ) as e:
+            logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
             raise AuthError(
                 self.TOKEN_NOT_FOUND_HTTP_STATUS, "Invalid macaroon passed.",
                 errcode=Codes.UNKNOWN_TOKEN
@@ -499,11 +508,8 @@ class Auth(object):
     def _look_up_user_by_access_token(self, token):
         ret = yield self.store.get_user_by_access_token(token)
         if not ret:
-            logger.warn("Unrecognised access token - not in store.")
-            raise AuthError(
-                self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
-                errcode=Codes.UNKNOWN_TOKEN
-            )
+            defer.returnValue(None)
+
         # we use ret.get() below because *lots* of unit tests stub out
         # get_user_by_access_token in a way where it only returns a couple of
         # the fields.
@@ -532,17 +538,6 @@ class Auth(object):
         return self.store.is_server_admin(user)
 
     @defer.inlineCallbacks
-    def add_auth_events(self, builder, context):
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
-        auth_ids = yield self.compute_auth_events(builder, prev_state_ids)
-
-        auth_events_entries = yield self.store.add_event_hashes(
-            auth_ids
-        )
-
-        builder.auth_events = auth_events_entries
-
-    @defer.inlineCallbacks
     def compute_auth_events(self, event, current_state_ids, for_verification=False):
         if event.type == EventTypes.Create:
             defer.returnValue([])
@@ -558,7 +553,7 @@ class Auth(object):
         key = (EventTypes.JoinRules, "", )
         join_rule_event_id = current_state_ids.get(key)
 
-        key = (EventTypes.Member, event.user_id, )
+        key = (EventTypes.Member, event.sender, )
         member_event_id = current_state_ids.get(key)
 
         key = (EventTypes.Create, "", )
@@ -608,7 +603,7 @@ class Auth(object):
 
         defer.returnValue(auth_ids)
 
-    def check_redaction(self, event, auth_events):
+    def check_redaction(self, room_version, event, auth_events):
         """Check whether the event sender is allowed to redact the target event.
 
         Returns:
@@ -621,7 +616,7 @@ class Auth(object):
             AuthError if the event sender is definitely not allowed to redact
             the target event.
         """
-        return event_auth.check_redaction(event, auth_events)
+        return event_auth.check_redaction(room_version, event, auth_events)
 
     @defer.inlineCallbacks
     def check_can_change_room_list(self, room_id, user):
@@ -778,9 +773,10 @@ class Auth(object):
             threepid should never be set at the same time.
         """
 
-        # Never fail an auth check for the server notices users
+        # Never fail an auth check for the server notices users or support user
         # This can be a problem where event creation is prohibited due to blocking
-        if user_id == self.hs.config.server_notices_mxid:
+        is_support = yield self.store.is_support_user(user_id)
+        if user_id == self.hs.config.server_notices_mxid or is_support:
             return
 
         if self.hs.config.hs_disabled:
@@ -805,7 +801,9 @@ class Auth(object):
             elif threepid:
                 # If the user does not exist yet, but is signing up with a
                 # reserved threepid then pass auth check
-                if is_threepid_reserved(self.hs.config, threepid):
+                if is_threepid_reserved(
+                    self.hs.config.mau_limits_reserved_threepids, threepid
+                ):
                     return
             # Else if there is no room in the MAU bucket, bail
             current_mau = yield self.store.get_monthly_active_count()
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 9ad1939ed5..02127373a4 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -51,6 +51,7 @@ class LoginType(object):
     EMAIL_IDENTITY = u"m.login.email.identity"
     MSISDN = u"m.login.msisdn"
     RECAPTCHA = u"m.login.recaptcha"
+    TERMS = u"m.login.terms"
     DUMMY = u"m.login.dummy"
 
     # Only for C/S API v1
@@ -61,15 +62,18 @@ class LoginType(object):
 class EventTypes(object):
     Member = "m.room.member"
     Create = "m.room.create"
+    Tombstone = "m.room.tombstone"
     JoinRules = "m.room.join_rules"
     PowerLevels = "m.room.power_levels"
     Aliases = "m.room.aliases"
     Redaction = "m.room.redaction"
     ThirdPartyInvite = "m.room.third_party_invite"
+    Encryption = "m.room.encryption"
 
     RoomHistoryVisibility = "m.room.history_visibility"
     CanonicalAlias = "m.room.canonical_alias"
     RoomAvatar = "m.room.avatar"
+    RoomEncryption = "m.room.encryption"
     GuestAccess = "m.room.guest_access"
     Encryption = "m.room.encryption"
 
@@ -101,7 +105,14 @@ class ThirdPartyEntityKind(object):
 
 class RoomVersions(object):
     V1 = "1"
-    VDH_TEST = "vdh-test-version"
+    V2 = "2"
+    V3 = "3"
+    STATE_V2_TEST = "state-v2-test"
+
+
+class RoomDisposition(object):
+    STABLE = "stable"
+    UNSTABLE = "unstable"
 
 
 # the version we will give rooms which are created on this server
@@ -109,7 +120,36 @@ DEFAULT_ROOM_VERSION = RoomVersions.V1
 
 # vdh-test-version is a placeholder to get room versioning support working and tested
 # until we have a working v2.
-KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
+KNOWN_ROOM_VERSIONS = {
+    RoomVersions.V1,
+    RoomVersions.V2,
+    RoomVersions.V3,
+    RoomVersions.STATE_V2_TEST,
+    RoomVersions.V3,
+}
+
+
+class EventFormatVersions(object):
+    """This is an internal enum for tracking the version of the event format,
+    independently from the room version.
+    """
+    V1 = 1
+    V2 = 2
+
+
+KNOWN_EVENT_FORMAT_VERSIONS = {
+    EventFormatVersions.V1,
+    EventFormatVersions.V2,
+}
+
 
 ServerNoticeMsgType = "m.server_notice"
 ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
+
+
+class UserTypes(object):
+    """Allows for user type specific behaviour. With the benefit of hindsight
+    'admin' and 'guest' users should also be UserTypes. Normal users are type None
+    """
+    SUPPORT = "support"
+    ALL_USER_TYPES = (SUPPORT,)
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 48b903374d..0b464834ce 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -348,6 +348,24 @@ class IncompatibleRoomVersionError(SynapseError):
         )
 
 
+class RequestSendFailed(RuntimeError):
+    """Sending a HTTP request over federation failed due to not being able to
+    talk to the remote server for some reason.
+
+    This exception is used to differentiate "expected" errors that arise due to
+    networking (e.g. DNS failures, connection timeouts etc), versus unexpected
+    errors (like programming errors).
+    """
+    def __init__(self, inner_exception, can_retry):
+        super(RequestSendFailed, self).__init__(
+            "Failed to send request: %s: %s" % (
+                type(inner_exception).__name__, inner_exception,
+            )
+        )
+        self.inner_exception = inner_exception
+        self.can_retry = can_retry
+
+
 def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
     """ Utility method for constructing an error response for client-server
     interactions.
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 677c0bdd4c..3906475403 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -12,6 +12,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from six import text_type
+
 import jsonschema
 from canonicaljson import json
 from jsonschema import FormatChecker
@@ -353,7 +355,7 @@ class Filter(object):
             sender = event.user_id
             room_id = None
             ev_type = "m.presence"
-            is_url = False
+            contains_url = False
         else:
             sender = event.get("sender", None)
             if not sender:
@@ -368,13 +370,16 @@ class Filter(object):
 
             room_id = event.get("room_id", None)
             ev_type = event.get("type", None)
-            is_url = "url" in event.get("content", {})
+
+            content = event.get("content", {})
+            # check if there is a string url field in the content for filtering purposes
+            contains_url = isinstance(content.get("url"), text_type)
 
         return self.check_fields(
             room_id,
             sender,
             ev_type,
-            is_url,
+            contains_url,
         )
 
     def check_fields(self, room_id, sender, event_type, contains_url):
@@ -439,6 +444,20 @@ class Filter(object):
     def include_redundant_members(self):
         return self.filter_json.get("include_redundant_members", False)
 
+    def with_room_ids(self, room_ids):
+        """Returns a new filter with the given room IDs appended.
+
+        Args:
+            room_ids (iterable[unicode]): The room_ids to add
+
+        Returns:
+            filter: A new filter including the given rooms and the old
+                    filter's rooms.
+        """
+        newFilter = Filter(self.filter_json)
+        newFilter.rooms += room_ids
+        return newFilter
+
 
 def _matches_wildcard(actual_value, filter_value):
     if filter_value.endswith("*"):
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index 6d9f1ca0ef..8102176653 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -24,11 +24,12 @@ from synapse.config import ConfigError
 
 CLIENT_PREFIX = "/_matrix/client/api/v1"
 CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
-FEDERATION_PREFIX = "/_matrix/federation/v1"
+FEDERATION_PREFIX = "/_matrix/federation"
+FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
+FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
 STATIC_PREFIX = "/_matrix/static"
 WEB_CLIENT_PREFIX = "/_matrix/client"
 CONTENT_REPO_PREFIX = "/_matrix/content"
-SERVER_KEY_PREFIX = "/_matrix/key/v1"
 SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
 MEDIA_PREFIX = "/_matrix/media/r0"
 LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py
index c3afcc573b..f56f5fcc13 100644
--- a/synapse/app/__init__.py
+++ b/synapse/app/__init__.py
@@ -12,22 +12,38 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+import logging
 import sys
 
 from synapse import python_dependencies  # noqa: E402
 
 sys.dont_write_bytecode = True
 
+logger = logging.getLogger(__name__)
 
 try:
     python_dependencies.check_requirements()
-except python_dependencies.MissingRequirementError as e:
-    message = "\n".join([
-        "Missing Requirement: %s" % (str(e),),
-        "To install run:",
-        "    pip install --upgrade --force \"%s\"" % (e.dependency,),
-        "",
-    ])
-    sys.stderr.writelines(message)
+except python_dependencies.DependencyException as e:
+    sys.stderr.writelines(e.message)
     sys.exit(1)
+
+
+def check_bind_error(e, address, bind_addresses):
+    """
+    This method checks an exception occurred while binding on 0.0.0.0.
+    If :: is specified in the bind addresses a warning is shown.
+    The exception is still raised otherwise.
+
+    Binding on both 0.0.0.0 and :: causes an exception on Linux and macOS
+    because :: binds on both IPv4 and IPv6 (as per RFC 3493).
+    When binding on 0.0.0.0 after :: this can safely be ignored.
+
+    Args:
+        e (Exception): Exception that was caught.
+        address (str): Address on which binding was attempted.
+        bind_addresses (list): Addresses on which the service listens.
+    """
+    if address == '0.0.0.0' and '::' in bind_addresses:
+        logger.warn('Failed to listen on 0.0.0.0, continuing because listening on [::]')
+    else:
+        raise e
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 18584226e9..32e8b8a3f5 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -15,18 +15,38 @@
 
 import gc
 import logging
+import signal
 import sys
+import traceback
 
 import psutil
 from daemonize import Daemonize
 
 from twisted.internet import error, reactor
+from twisted.protocols.tls import TLSMemoryBIOFactory
 
+import synapse
+from synapse.app import check_bind_error
+from synapse.crypto import context_factory
 from synapse.util import PreserveLoggingContext
 from synapse.util.rlimit import change_resource_limit
+from synapse.util.versionstring import get_version_string
 
 logger = logging.getLogger(__name__)
 
+_sighup_callbacks = []
+
+
+def register_sighup(func):
+    """
+    Register a function to be called when a SIGHUP occurs.
+
+    Args:
+        func (function): Function to be called when sent a SIGHUP signal.
+            Will be called with a single argument, the homeserver.
+    """
+    _sighup_callbacks.append(func)
+
 
 def start_worker_reactor(appname, config):
     """ Run the reactor in the main process
@@ -135,62 +155,154 @@ def listen_metrics(bind_addresses, port):
     from prometheus_client import start_http_server
 
     for host in bind_addresses:
-        reactor.callInThread(start_http_server, int(port),
-                             addr=host, registry=RegistryProxy)
-        logger.info("Metrics now reporting on %s:%d", host, port)
+        logger.info("Starting metrics listener on %s:%d", host, port)
+        start_http_server(port, addr=host, registry=RegistryProxy)
 
 
 def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
     """
     Create a TCP socket for a port and several addresses
+
+    Returns:
+        list[twisted.internet.tcp.Port]: listening for TCP connections
     """
+    r = []
     for address in bind_addresses:
         try:
-            reactor.listenTCP(
-                port,
-                factory,
-                backlog,
-                address
+            r.append(
+                reactor.listenTCP(
+                    port,
+                    factory,
+                    backlog,
+                    address
+                )
             )
         except error.CannotListenError as e:
             check_bind_error(e, address, bind_addresses)
 
+    return r
+
 
 def listen_ssl(
     bind_addresses, port, factory, context_factory, reactor=reactor, backlog=50
 ):
     """
-    Create an SSL socket for a port and several addresses
+    Create an TLS-over-TCP socket for a port and several addresses
+
+    Returns:
+        list of twisted.internet.tcp.Port listening for TLS connections
     """
+    r = []
     for address in bind_addresses:
         try:
-            reactor.listenSSL(
-                port,
-                factory,
-                context_factory,
-                backlog,
-                address
+            r.append(
+                reactor.listenSSL(
+                    port,
+                    factory,
+                    context_factory,
+                    backlog,
+                    address
+                )
             )
         except error.CannotListenError as e:
             check_bind_error(e, address, bind_addresses)
 
+    return r
 
-def check_bind_error(e, address, bind_addresses):
+
+def refresh_certificate(hs):
+    """
+    Refresh the TLS certificates that Synapse is using by re-reading them from
+    disk and updating the TLS context factories to use them.
     """
-    This method checks an exception occurred while binding on 0.0.0.0.
-    If :: is specified in the bind addresses a warning is shown.
-    The exception is still raised otherwise.
 
-    Binding on both 0.0.0.0 and :: causes an exception on Linux and macOS
-    because :: binds on both IPv4 and IPv6 (as per RFC 3493).
-    When binding on 0.0.0.0 after :: this can safely be ignored.
+    if not hs.config.has_tls_listener():
+        # attempt to reload the certs for the good of the tls_fingerprints
+        hs.config.read_certificate_from_disk(require_cert_and_key=False)
+        return
+
+    hs.config.read_certificate_from_disk(require_cert_and_key=True)
+    hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
+
+    if hs._listening_services:
+        logger.info("Updating context factories...")
+        for i in hs._listening_services:
+            # When you listenSSL, it doesn't make an SSL port but a TCP one with
+            # a TLS wrapping factory around the factory you actually want to get
+            # requests. This factory attribute is public but missing from
+            # Twisted's documentation.
+            if isinstance(i.factory, TLSMemoryBIOFactory):
+                addr = i.getHost()
+                logger.info(
+                    "Replacing TLS context factory on [%s]:%i", addr.host, addr.port,
+                )
+                # We want to replace TLS factories with a new one, with the new
+                # TLS configuration. We do this by reaching in and pulling out
+                # the wrappedFactory, and then re-wrapping it.
+                i.factory = TLSMemoryBIOFactory(
+                    hs.tls_server_context_factory,
+                    False,
+                    i.factory.wrappedFactory
+                )
+        logger.info("Context factories updated.")
+
+
+def start(hs, listeners=None):
+    """
+    Start a Synapse server or worker.
 
     Args:
-        e (Exception): Exception that was caught.
-        address (str): Address on which binding was attempted.
-        bind_addresses (list): Addresses on which the service listens.
+        hs (synapse.server.HomeServer)
+        listeners (list[dict]): Listener configuration ('listeners' in homeserver.yaml)
     """
-    if address == '0.0.0.0' and '::' in bind_addresses:
-        logger.warn('Failed to listen on 0.0.0.0, continuing because listening on [::]')
-    else:
-        raise e
+    try:
+        # Set up the SIGHUP machinery.
+        if hasattr(signal, "SIGHUP"):
+            def handle_sighup(*args, **kwargs):
+                for i in _sighup_callbacks:
+                    i(hs)
+
+            signal.signal(signal.SIGHUP, handle_sighup)
+
+            register_sighup(refresh_certificate)
+
+        # Load the certificate from disk.
+        refresh_certificate(hs)
+
+        # It is now safe to start your Synapse.
+        hs.start_listening(listeners)
+        hs.get_datastore().start_profiling()
+
+        setup_sentry(hs)
+    except Exception:
+        traceback.print_exc(file=sys.stderr)
+        reactor = hs.get_reactor()
+        if reactor.running:
+            reactor.stop()
+        sys.exit(1)
+
+
+def setup_sentry(hs):
+    """Enable sentry integration, if enabled in configuration
+
+    Args:
+        hs (synapse.server.HomeServer)
+    """
+
+    if not hs.config.sentry_enabled:
+        return
+
+    import sentry_sdk
+    sentry_sdk.init(
+        dsn=hs.config.sentry_dsn,
+        release=get_version_string(synapse),
+    )
+
+    # We set some default tags that give some context to this instance
+    with sentry_sdk.configure_scope() as scope:
+        scope.set_tag("matrix_server_name", hs.config.server_name)
+
+        app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver"
+        name = hs.config.worker_name if hs.config.worker_name else "master"
+        scope.set_tag("worker_app", app)
+        scope.set_tag("worker_name", name)
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index 8559e141af..33107f56d1 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -168,12 +168,7 @@ def start(config_options):
     )
 
     ps.setup()
-    ps.start_listening(config.worker_listeners)
-
-    def start():
-        ps.get_datastore().start_profiling()
-
-    reactor.callWhenRunning(start)
+    reactor.callWhenRunning(_base.start, ps, config.worker_listeners)
 
     _base.start_worker_reactor("synapse-appservice", config)
 
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 76aed8c60a..043b48f8f3 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -25,7 +25,6 @@ from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.logger import setup_logging
-from synapse.crypto import context_factory
 from synapse.http.server import JsonResource
 from synapse.http.site import SynapseSite
 from synapse.metrics import RegistryProxy
@@ -41,6 +40,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
 from synapse.replication.slave.storage.room import RoomStore
 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.rest.client.v1.login import LoginRestServlet
 from synapse.rest.client.v1.room import (
     JoinedRoomMemberListRestServlet,
     PublicRoomListRestServlet,
@@ -48,6 +48,7 @@ from synapse.rest.client.v1.room import (
     RoomMemberListRestServlet,
     RoomStateRestServlet,
 )
+from synapse.rest.client.v2_alpha.register import RegisterRestServlet
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
 from synapse.util.httpresourcetree import create_resource_tree
@@ -93,6 +94,8 @@ class ClientReaderServer(HomeServer):
                     JoinedRoomMemberListRestServlet(self).register(resource)
                     RoomStateRestServlet(self).register(resource)
                     RoomEventContextServlet(self).register(resource)
+                    RegisterRestServlet(self).register(resource)
+                    LoginRestServlet(self).register(resource)
 
                     resources.update({
                         "/_matrix/client/r0": resource,
@@ -164,26 +167,16 @@ def start(config_options):
 
     database_engine = create_engine(config.database_config)
 
-    tls_server_context_factory = context_factory.ServerContextFactory(config)
-    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
-
     ss = ClientReaderServer(
         config.server_name,
         db_config=config.database_config,
-        tls_server_context_factory=tls_server_context_factory,
-        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
     )
 
     ss.setup()
-    ss.start_listening(config.worker_listeners)
-
-    def start():
-        ss.get_datastore().start_profiling()
-
-    reactor.callWhenRunning(start)
+    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
 
     _base.start_worker_reactor("synapse-client-reader", config)
 
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index e4a68715aa..b8e5196152 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -25,7 +25,6 @@ from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.logger import setup_logging
-from synapse.crypto import context_factory
 from synapse.http.server import JsonResource
 from synapse.http.site import SynapseSite
 from synapse.metrics import RegistryProxy
@@ -185,26 +184,16 @@ def start(config_options):
 
     database_engine = create_engine(config.database_config)
 
-    tls_server_context_factory = context_factory.ServerContextFactory(config)
-    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
-
     ss = EventCreatorServer(
         config.server_name,
         db_config=config.database_config,
-        tls_server_context_factory=tls_server_context_factory,
-        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
     )
 
     ss.setup()
-    ss.start_listening(config.worker_listeners)
-
-    def start():
-        ss.get_datastore().start_profiling()
-
-    reactor.callWhenRunning(start)
+    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
 
     _base.start_worker_reactor("synapse-event-creator", config)
 
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 228a297fb8..b116c17669 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -26,7 +26,6 @@ from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.logger import setup_logging
-from synapse.crypto import context_factory
 from synapse.federation.transport.server import TransportLayerServer
 from synapse.http.site import SynapseSite
 from synapse.metrics import RegistryProxy
@@ -41,6 +40,7 @@ from synapse.replication.slave.storage.profile import SlavedProfileStore
 from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
 from synapse.replication.slave.storage.pushers import SlavedPusherStore
 from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
+from synapse.replication.slave.storage.registration import SlavedRegistrationStore
 from synapse.replication.slave.storage.room import RoomStore
 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
@@ -63,6 +63,7 @@ class FederationReaderSlavedStore(
     SlavedReceiptsStore,
     SlavedEventStore,
     SlavedKeyStore,
+    SlavedRegistrationStore,
     RoomStore,
     DirectoryStore,
     SlavedTransactionStore,
@@ -87,6 +88,16 @@ class FederationReaderServer(HomeServer):
                     resources.update({
                         FEDERATION_PREFIX: TransportLayerServer(self),
                     })
+                if name == "openid" and "federation" not in res["names"]:
+                    # Only load the openid resource separately if federation resource
+                    # is not specified since federation resource includes openid
+                    # resource.
+                    resources.update({
+                        FEDERATION_PREFIX: TransportLayerServer(
+                            self,
+                            servlet_groups=["openid"],
+                        ),
+                    })
 
         root_resource = create_resource_tree(resources, NoResource())
 
@@ -99,7 +110,8 @@ class FederationReaderServer(HomeServer):
                 listener_config,
                 root_resource,
                 self.version_string,
-            )
+            ),
+            reactor=self.get_reactor()
         )
 
         logger.info("Synapse federation reader now listening on port %d", port)
@@ -151,26 +163,16 @@ def start(config_options):
 
     database_engine = create_engine(config.database_config)
 
-    tls_server_context_factory = context_factory.ServerContextFactory(config)
-    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
-
     ss = FederationReaderServer(
         config.server_name,
         db_config=config.database_config,
-        tls_server_context_factory=tls_server_context_factory,
-        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
     )
 
     ss.setup()
-    ss.start_listening(config.worker_listeners)
-
-    def start():
-        ss.get_datastore().start_profiling()
-
-    reactor.callWhenRunning(start)
+    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
 
     _base.start_worker_reactor("synapse-federation-reader", config)
 
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index e9a99d76e1..a461442fdc 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -25,7 +25,6 @@ from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.logger import setup_logging
-from synapse.crypto import context_factory
 from synapse.federation import send_queue
 from synapse.http.site import SynapseSite
 from synapse.metrics import RegistryProxy
@@ -183,26 +182,17 @@ def start(config_options):
     # Force the pushers to start since they will be disabled in the main config
     config.send_federation = True
 
-    tls_server_context_factory = context_factory.ServerContextFactory(config)
-    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
-
-    ps = FederationSenderServer(
+    ss = FederationSenderServer(
         config.server_name,
         db_config=config.database_config,
-        tls_server_context_factory=tls_server_context_factory,
-        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
     )
 
-    ps.setup()
-    ps.start_listening(config.worker_listeners)
-
-    def start():
-        ps.get_datastore().start_profiling()
+    ss.setup()
+    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
 
-    reactor.callWhenRunning(start)
     _base.start_worker_reactor("synapse-federation-sender", config)
 
 
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index f5c61dec5b..8479fee738 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -21,12 +21,11 @@ from twisted.web.resource import NoResource
 
 import synapse
 from synapse import events
-from synapse.api.errors import SynapseError
+from synapse.api.errors import HttpResponseException, SynapseError
 from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.logger import setup_logging
-from synapse.crypto import context_factory
 from synapse.http.server import JsonResource
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.http.site import SynapseSite
@@ -67,10 +66,15 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
         headers = {
             "Authorization": auth_headers,
         }
-        result = yield self.http_client.get_json(
-            self.main_uri + request.uri.decode('ascii'),
-            headers=headers,
-        )
+
+        try:
+            result = yield self.http_client.get_json(
+                self.main_uri + request.uri.decode('ascii'),
+                headers=headers,
+            )
+        except HttpResponseException as e:
+            raise e.to_synapse_error()
+
         defer.returnValue((200, result))
 
     @defer.inlineCallbacks
@@ -241,26 +245,16 @@ def start(config_options):
 
     database_engine = create_engine(config.database_config)
 
-    tls_server_context_factory = context_factory.ServerContextFactory(config)
-    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
-
     ss = FrontendProxyServer(
         config.server_name,
         db_config=config.database_config,
-        tls_server_context_factory=tls_server_context_factory,
-        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
     )
 
     ss.setup()
-    ss.start_listening(config.worker_listeners)
-
-    def start():
-        ss.get_datastore().start_profiling()
-
-    reactor.callWhenRunning(start)
+    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
 
     _base.start_worker_reactor("synapse-frontend-proxy", config)
 
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 593e1e75db..e8b6cc3114 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,6 +14,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
+from __future__ import print_function
+
 import gc
 import logging
 import os
@@ -25,6 +29,7 @@ from prometheus_client import Gauge
 
 from twisted.application import service
 from twisted.internet import defer, reactor
+from twisted.python.failure import Failure
 from twisted.web.resource import EncodingResourceWrapper, NoResource
 from twisted.web.server import GzipEncoderFactory
 from twisted.web.static import File
@@ -37,7 +42,6 @@ from synapse.api.urls import (
     FEDERATION_PREFIX,
     LEGACY_MEDIA_PREFIX,
     MEDIA_PREFIX,
-    SERVER_KEY_PREFIX,
     SERVER_KEY_V2_PREFIX,
     STATIC_PREFIX,
     WEB_CLIENT_PREFIX,
@@ -46,7 +50,6 @@ from synapse.app import _base
 from synapse.app._base import listen_ssl, listen_tcp, quit_with_error
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
-from synapse.crypto import context_factory
 from synapse.federation.transport.server import TransportLayerServer
 from synapse.http.additional_resource import AdditionalResource
 from synapse.http.server import RootRedirect
@@ -55,13 +58,13 @@ from synapse.metrics import RegistryProxy
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
 from synapse.module_api import ModuleApi
-from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirements
+from synapse.python_dependencies import check_requirements
 from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
 from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
 from synapse.rest import ClientRestResource
-from synapse.rest.key.v1.server_key_resource import LocalKey
 from synapse.rest.key.v2 import KeyApiV2Resource
 from synapse.rest.media.v0.content_repository import ContentRepoResource
+from synapse.rest.well_known import WellKnownResource
 from synapse.server import HomeServer
 from synapse.storage import DataStore, are_all_users_on_domain
 from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
@@ -81,36 +84,6 @@ def gz_wrap(r):
     return EncodingResourceWrapper(r, [GzipEncoderFactory()])
 
 
-def build_resource_for_web_client(hs):
-    webclient_path = hs.get_config().web_client_location
-    if not webclient_path:
-        try:
-            import syweb
-        except ImportError:
-            quit_with_error(
-                "Could not find a webclient.\n\n"
-                "Please either install the matrix-angular-sdk or configure\n"
-                "the location of the source to serve via the configuration\n"
-                "option `web_client_location`\n\n"
-                "To install the `matrix-angular-sdk` via pip, run:\n\n"
-                "    pip install '%(dep)s'\n"
-                "\n"
-                "You can also disable hosting of the webclient via the\n"
-                "configuration option `web_client`\n"
-                % {"dep": CONDITIONAL_REQUIREMENTS["web_client"].keys()[0]}
-            )
-        syweb_path = os.path.dirname(syweb.__file__)
-        webclient_path = os.path.join(syweb_path, "webclient")
-    # GZip is disabled here due to
-    # https://twistedmatrix.com/trac/ticket/7678
-    # (It can stay enabled for the API resources: they call
-    # write() with the whole body and then finish() straight
-    # after and so do not trigger the bug.
-    # GzipFile was removed in commit 184ba09
-    # return GzipFile(webclient_path)  # TODO configurable?
-    return File(webclient_path)  # TODO configurable?
-
-
 class SynapseHomeServer(HomeServer):
     DATASTORE_CLASS = DataStore
 
@@ -120,12 +93,13 @@ class SynapseHomeServer(HomeServer):
         tls = listener_config.get("tls", False)
         site_tag = listener_config.get("tag", port)
 
-        if tls and config.no_tls:
-            return
-
         resources = {}
         for res in listener_config["resources"]:
             for name in res["names"]:
+                if name == "openid" and "federation" in res["names"]:
+                    # Skip loading openid resource if federation is defined
+                    # since federation resource will include openid
+                    continue
                 resources.update(self._configure_named_resource(
                     name, res.get("compress", False),
                 ))
@@ -139,15 +113,18 @@ class SynapseHomeServer(HomeServer):
             handler = handler_cls(config, module_api)
             resources[path] = AdditionalResource(self, handler.handle_request)
 
+        # try to find something useful to redirect '/' to
         if WEB_CLIENT_PREFIX in resources:
             root_resource = RootRedirect(WEB_CLIENT_PREFIX)
+        elif STATIC_PREFIX in resources:
+            root_resource = RootRedirect(STATIC_PREFIX)
         else:
             root_resource = NoResource()
 
         root_resource = create_resource_tree(resources, root_resource)
 
         if tls:
-            listen_ssl(
+            ports = listen_ssl(
                 bind_addresses,
                 port,
                 SynapseSite(
@@ -158,10 +135,12 @@ class SynapseHomeServer(HomeServer):
                     self.version_string,
                 ),
                 self.tls_server_context_factory,
+                reactor=self.get_reactor(),
             )
+            logger.info("Synapse now listening on TCP port %d (TLS)", port)
 
         else:
-            listen_tcp(
+            ports = listen_tcp(
                 bind_addresses,
                 port,
                 SynapseSite(
@@ -170,9 +149,12 @@ class SynapseHomeServer(HomeServer):
                     listener_config,
                     root_resource,
                     self.version_string,
-                )
+                ),
+                reactor=self.get_reactor(),
             )
-        logger.info("Synapse now listening on port %d", port)
+            logger.info("Synapse now listening on TCP port %d", port)
+
+        return ports
 
     def _configure_named_resource(self, name, compress=False):
         """Build a resource map for a named resource
@@ -197,8 +179,13 @@ class SynapseHomeServer(HomeServer):
                 "/_matrix/client/unstable": client_resource,
                 "/_matrix/client/v2_alpha": client_resource,
                 "/_matrix/client/versions": client_resource,
+                "/.well-known/matrix/client": WellKnownResource(self),
             })
 
+            if self.get_config().saml2_enabled:
+                from synapse.rest.saml2 import SAML2Resource
+                resources["/_matrix/saml2"] = SAML2Resource(self)
+
         if name == "consent":
             from synapse.rest.consent.consent_resource import ConsentResource
             consent_resource = ConsentResource(self)
@@ -213,6 +200,11 @@ class SynapseHomeServer(HomeServer):
                 FEDERATION_PREFIX: TransportLayerServer(self),
             })
 
+        if name == "openid":
+            resources.update({
+                FEDERATION_PREFIX: TransportLayerServer(self, servlet_groups=["openid"]),
+            })
+
         if name in ["static", "client"]:
             resources.update({
                 STATIC_PREFIX: File(
@@ -236,13 +228,19 @@ class SynapseHomeServer(HomeServer):
                 )
 
         if name in ["keys", "federation"]:
-            resources.update({
-                SERVER_KEY_PREFIX: LocalKey(self),
-                SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
-            })
+            resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
 
         if name == "webclient":
-            resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
+            webclient_path = self.get_config().web_client_location
+
+            if webclient_path is None:
+                logger.warning(
+                    "Not enabling webclient resource, as web_client_location is unset."
+                )
+            else:
+                # GZip is disabled here due to
+                # https://twistedmatrix.com/trac/ticket/7678
+                resources[WEB_CLIENT_PREFIX] = File(webclient_path)
 
         if name == "metrics" and self.get_config().enable_metrics:
             resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
@@ -252,12 +250,14 @@ class SynapseHomeServer(HomeServer):
 
         return resources
 
-    def start_listening(self):
+    def start_listening(self, listeners):
         config = self.get_config()
 
-        for listener in config.listeners:
+        for listener in listeners:
             if listener["type"] == "http":
-                self._listener_http(config, listener)
+                self._listening_services.extend(
+                    self._listener_http(config, listener)
+                )
             elif listener["type"] == "manhole":
                 listen_tcp(
                     listener["bind_addresses"],
@@ -269,14 +269,14 @@ class SynapseHomeServer(HomeServer):
                     )
                 )
             elif listener["type"] == "replication":
-                bind_addresses = listener["bind_addresses"]
-                for address in bind_addresses:
-                    factory = ReplicationStreamProtocolFactory(self)
-                    server_listener = reactor.listenTCP(
-                        listener["port"], factory, interface=address
-                    )
+                services = listen_tcp(
+                    listener["bind_addresses"],
+                    listener["port"],
+                    ReplicationStreamProtocolFactory(self),
+                )
+                for s in services:
                     reactor.addSystemEventTrigger(
-                        "before", "shutdown", server_listener.stopListening,
+                        "before", "shutdown", s.stopListening,
                     )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
@@ -337,24 +337,19 @@ def setup(config_options):
         # generating config files and shouldn't try to continue.
         sys.exit(0)
 
-    synapse.config.logger.setup_logging(config, use_worker_options=False)
-
-    # check any extra requirements we have now we have a config
-    check_requirements(config)
+    synapse.config.logger.setup_logging(
+        config,
+        use_worker_options=False
+    )
 
     events.USE_FROZEN_DICTS = config.use_frozen_dicts
 
-    tls_server_context_factory = context_factory.ServerContextFactory(config)
-    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
-
     database_engine = create_engine(config.database_config)
     config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
 
     hs = SynapseHomeServer(
         config.server_name,
         db_config=config.database_config,
-        tls_server_context_factory=tls_server_context_factory,
-        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
@@ -381,12 +376,79 @@ def setup(config_options):
     logger.info("Database prepared in %s.", config.database_config['name'])
 
     hs.setup()
-    hs.start_listening()
 
+    @defer.inlineCallbacks
+    def do_acme():
+        """
+        Reprovision an ACME certificate, if it's required.
+
+        Returns:
+            Deferred[bool]: Whether the cert has been updated.
+        """
+        acme = hs.get_acme_handler()
+
+        # Check how long the certificate is active for.
+        cert_days_remaining = hs.config.is_disk_cert_valid(
+            allow_self_signed=False
+        )
+
+        # We want to reprovision if cert_days_remaining is None (meaning no
+        # certificate exists), or the days remaining number it returns
+        # is less than our re-registration threshold.
+        provision = False
+
+        if (
+            cert_days_remaining is None or
+            cert_days_remaining < hs.config.acme_reprovision_threshold
+        ):
+            provision = True
+
+        if provision:
+            yield acme.provision_certificate()
+
+        defer.returnValue(provision)
+
+    @defer.inlineCallbacks
+    def reprovision_acme():
+        """
+        Provision a certificate from ACME, if required, and reload the TLS
+        certificate if it's renewed.
+        """
+        reprovisioned = yield do_acme()
+        if reprovisioned:
+            _base.refresh_certificate(hs)
+
+    @defer.inlineCallbacks
     def start():
-        hs.get_pusherpool().start()
-        hs.get_datastore().start_profiling()
-        hs.get_datastore().start_doing_background_updates()
+        try:
+            # Run the ACME provisioning code, if it's enabled.
+            if hs.config.acme_enabled:
+                acme = hs.get_acme_handler()
+                # Start up the webservices which we will respond to ACME
+                # challenges with, and then provision.
+                yield acme.start_listening()
+                yield do_acme()
+
+                # Check if it needs to be reprovisioned every day.
+                hs.get_clock().looping_call(
+                    reprovision_acme,
+                    24 * 60 * 60 * 1000
+                )
+
+            _base.start(hs, config.listeners)
+
+            hs.get_pusherpool().start()
+            hs.get_datastore().start_doing_background_updates()
+        except Exception:
+            # Print the exception and bail out.
+            print("Error during startup:", file=sys.stderr)
+
+            # this gives better tracebacks than traceback.print_exc()
+            Failure().printTraceback(file=sys.stderr)
+
+            if reactor.running:
+                reactor.stop()
+            sys.exit(1)
 
     reactor.callWhenRunning(start)
 
@@ -394,7 +456,8 @@ def setup(config_options):
 
 
 class SynapseService(service.Service):
-    """A twisted Service class that will start synapse. Used to run synapse
+    """
+    A twisted Service class that will start synapse. Used to run synapse
     via twistd and a .tac.
     """
     def __init__(self, config):
@@ -492,6 +555,9 @@ def run(hs):
                 stats["memory_rss"] += process.memory_info().rss
                 stats["cpu_average"] += int(process.cpu_percent(interval=None))
 
+        stats["database_engine"] = hs.get_datastore().database_engine_name
+        stats["database_server_version"] = hs.get_datastore().get_server_version()
+
         logger.info("Reporting stats to matrix.org: %s" % (stats,))
         try:
             yield hs.get_simple_http_client().put_json(
@@ -540,7 +606,7 @@ def run(hs):
         current_mau_count = 0
         reserved_count = 0
         store = hs.get_datastore()
-        if hs.config.limit_usage_by_mau:
+        if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
             current_mau_count = yield store.get_monthly_active_count()
             reserved_count = yield store.get_registered_reserved_users_count()
         current_mau_gauge.set(float(current_mau_count))
@@ -554,7 +620,7 @@ def run(hs):
         )
 
     start_generate_monthly_active_users()
-    if hs.config.limit_usage_by_mau:
+    if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
         clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000)
     # End of monthly active user settings
 
diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index acc0487adc..d4cc4e9443 100644
--- a/synapse/app/media_repository.py
+++ b/synapse/app/media_repository.py
@@ -26,7 +26,6 @@ from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.logger import setup_logging
-from synapse.crypto import context_factory
 from synapse.http.site import SynapseSite
 from synapse.metrics import RegistryProxy
 from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
@@ -151,26 +150,16 @@ def start(config_options):
 
     database_engine = create_engine(config.database_config)
 
-    tls_server_context_factory = context_factory.ServerContextFactory(config)
-    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
-
     ss = MediaRepositoryServer(
         config.server_name,
         db_config=config.database_config,
-        tls_server_context_factory=tls_server_context_factory,
-        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
     )
 
     ss.setup()
-    ss.start_listening(config.worker_listeners)
-
-    def start():
-        ss.get_datastore().start_profiling()
-
-    reactor.callWhenRunning(start)
+    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
 
     _base.start_worker_reactor("synapse-media-repository", config)
 
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 83b0863f00..cbf0d67f51 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -224,11 +224,10 @@ def start(config_options):
     )
 
     ps.setup()
-    ps.start_listening(config.worker_listeners)
 
     def start():
+        _base.start(ps, config.worker_listeners)
         ps.get_pusherpool().start()
-        ps.get_datastore().start_profiling()
 
     reactor.callWhenRunning(start)
 
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 3926c7f263..9163b56d86 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -226,7 +226,15 @@ class SynchrotronPresence(object):
 class SynchrotronTyping(object):
     def __init__(self, hs):
         self._latest_room_serial = 0
+        self._reset()
+
+    def _reset(self):
+        """
+        Reset the typing handler's data caches.
+        """
+        # map room IDs to serial numbers
         self._room_serials = {}
+        # map room IDs to sets of users currently typing
         self._room_typing = {}
 
     def stream_positions(self):
@@ -236,6 +244,12 @@ class SynchrotronTyping(object):
         return {"typing": self._latest_room_serial}
 
     def process_replication_rows(self, token, rows):
+        if self._latest_room_serial > token:
+            # The master has gone backwards. To prevent inconsistent data, just
+            # clear everything.
+            self._reset()
+
+        # Set the latest serial token to whatever the server gave us.
         self._latest_room_serial = token
 
         for row in rows:
@@ -431,12 +445,7 @@ def start(config_options):
     )
 
     ss.setup()
-    ss.start_listening(config.worker_listeners)
-
-    def start():
-        ss.get_datastore().start_profiling()
-
-    reactor.callWhenRunning(start)
+    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
 
     _base.start_worker_reactor("synapse-synchrotron", config)
 
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index 0a5f62b509..d1ab9512cd 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -26,7 +26,6 @@ from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.logger import setup_logging
-from synapse.crypto import context_factory
 from synapse.http.server import JsonResource
 from synapse.http.site import SynapseSite
 from synapse.metrics import RegistryProxy
@@ -211,26 +210,16 @@ def start(config_options):
     # Force the pushers to start since they will be disabled in the main config
     config.update_user_directory = True
 
-    tls_server_context_factory = context_factory.ServerContextFactory(config)
-    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
-
-    ps = UserDirectoryServer(
+    ss = UserDirectoryServer(
         config.server_name,
         db_config=config.database_config,
-        tls_server_context_factory=tls_server_context_factory,
-        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
     )
 
-    ps.setup()
-    ps.start_listening(config.worker_listeners)
-
-    def start():
-        ps.get_datastore().start_profiling()
-
-    reactor.callWhenRunning(start)
+    ss.setup()
+    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
 
     _base.start_worker_reactor("synapse-user-dir", config)
 
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 2430814796..685f15c061 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -53,8 +53,8 @@ import logging
 from twisted.internet import defer
 
 from synapse.appservice import ApplicationServiceState
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.util.logcontext import run_in_background
-from synapse.util.metrics import Measure
 
 logger = logging.getLogger(__name__)
 
@@ -104,27 +104,34 @@ class _ServiceQueuer(object):
         self.clock = clock
 
     def enqueue(self, service, event):
-        # if this service isn't being sent something
         self.queued_events.setdefault(service.id, []).append(event)
-        run_in_background(self._send_request, service)
 
-    @defer.inlineCallbacks
-    def _send_request(self, service):
+        # start a sender for this appservice if we don't already have one
+
         if service.id in self.requests_in_flight:
             return
 
+        run_as_background_process(
+            "as-sender-%s" % (service.id, ),
+            self._send_request, service,
+        )
+
+    @defer.inlineCallbacks
+    def _send_request(self, service):
+        # sanity-check: we shouldn't get here if this service already has a sender
+        # running.
+        assert(service.id not in self.requests_in_flight)
+
         self.requests_in_flight.add(service.id)
         try:
             while True:
                 events = self.queued_events.pop(service.id, [])
                 if not events:
                     return
-
-                with Measure(self.clock, "servicequeuer.send"):
-                    try:
-                        yield self.txn_ctrl.send(service, events)
-                    except Exception:
-                        logger.exception("AS request failed")
+                try:
+                    yield self.txn_ctrl.send(service, events)
+                except Exception:
+                    logger.exception("AS request failed")
         finally:
             self.requests_in_flight.discard(service.id)
 
@@ -223,7 +230,12 @@ class _Recoverer(object):
         self.backoff_counter = 1
 
     def recover(self):
-        self.clock.call_later((2 ** self.backoff_counter), self.retry)
+        def _retry():
+            run_as_background_process(
+                "as-recoverer-%s" % (self.service.id,),
+                self.retry,
+            )
+        self.clock.call_later((2 ** self.backoff_counter), _retry)
 
     def _backoff(self):
         # cap the backoff to be around 8.5min => (2^9) = 512 secs
diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py
index 79fe9c3dac..fca35b008c 100644
--- a/synapse/config/__main__.py
+++ b/synapse/config/__main__.py
@@ -16,7 +16,7 @@ from synapse.config._base import ConfigError
 
 if __name__ == "__main__":
     import sys
-    from homeserver import HomeServerConfig
+    from synapse.config.homeserver import HomeServerConfig
 
     action = sys.argv[1]
 
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 14dae65ea0..5aec43b702 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -135,10 +135,6 @@ class Config(object):
             return file_stream.read()
 
     @staticmethod
-    def default_path(name):
-        return os.path.abspath(os.path.join(os.path.curdir, name))
-
-    @staticmethod
     def read_config_file(file_path):
         with open(file_path) as file_stream:
             return yaml.load(file_stream)
@@ -151,8 +147,39 @@ class Config(object):
         return results
 
     def generate_config(
-        self, config_dir_path, server_name, is_generating_file, report_stats=None
+        self,
+        config_dir_path,
+        data_dir_path,
+        server_name,
+        generate_secrets=False,
+        report_stats=None,
     ):
+        """Build a default configuration file
+
+        This is used both when the user explicitly asks us to generate a config file
+        (eg with --generate_config), and before loading the config at runtime (to give
+        a base which the config files override)
+
+        Args:
+            config_dir_path (str): The path where the config files are kept. Used to
+                create filenames for things like the log config and the signing key.
+
+            data_dir_path (str): The path where the data files are kept. Used to create
+                filenames for things like the database and media store.
+
+            server_name (str): The server name. Used to initialise the server_name
+                config param, but also used in the names of some of the config files.
+
+            generate_secrets (bool): True if we should generate new secrets for things
+                like the macaroon_secret_key. If False, these parameters will be left
+                unset.
+
+            report_stats (bool|None): Initial setting for the report_stats setting.
+                If None, report_stats will be left unset.
+
+        Returns:
+            str: the yaml config file
+        """
         default_config = "# vim:ft=yaml\n"
 
         default_config += "\n\n".join(
@@ -160,15 +187,14 @@ class Config(object):
             for conf in self.invoke_all(
                 "default_config",
                 config_dir_path=config_dir_path,
+                data_dir_path=data_dir_path,
                 server_name=server_name,
-                is_generating_file=is_generating_file,
+                generate_secrets=generate_secrets,
                 report_stats=report_stats,
             )
         )
 
-        config = yaml.load(default_config)
-
-        return default_config, config
+        return default_config
 
     @classmethod
     def load_config(cls, description, argv):
@@ -231,7 +257,7 @@ class Config(object):
             "--keys-directory",
             metavar="DIRECTORY",
             help="Used with 'generate-*' options to specify where files such as"
-            " certs and signing keys should be stored in, unless explicitly"
+            " signing keys should be stored, unless explicitly"
             " specified in the config.",
         )
         config_parser.add_argument(
@@ -274,27 +300,24 @@ class Config(object):
                 if not cls.path_exists(config_dir_path):
                     os.makedirs(config_dir_path)
                 with open(config_path, "w") as config_file:
-                    config_str, config = obj.generate_config(
+                    config_str = obj.generate_config(
                         config_dir_path=config_dir_path,
+                        data_dir_path=os.getcwd(),
                         server_name=server_name,
                         report_stats=(config_args.report_stats == "yes"),
-                        is_generating_file=True,
+                        generate_secrets=True,
                     )
+                    config = yaml.load(config_str)
                     obj.invoke_all("generate_files", config)
                     config_file.write(config_str)
                 print(
                     (
                         "A config file has been generated in %r for server name"
-                        " %r with corresponding SSL keys and self-signed"
-                        " certificates. Please review this file and customise it"
+                        " %r. Please review this file and customise it"
                         " to your needs."
                     )
                     % (config_path, server_name)
                 )
-                print(
-                    "If this server name is incorrect, you will need to"
-                    " regenerate the SSL certificates"
-                )
                 return
             else:
                 print(
@@ -339,7 +362,7 @@ class Config(object):
         if not keys_directory:
             keys_directory = os.path.dirname(config_files[-1])
 
-        config_dir_path = os.path.abspath(keys_directory)
+        self.config_dir_path = os.path.abspath(keys_directory)
 
         specified_config = {}
         for config_file in config_files:
@@ -350,11 +373,13 @@ class Config(object):
             raise ConfigError(MISSING_SERVER_NAME)
 
         server_name = specified_config["server_name"]
-        _, config = self.generate_config(
-            config_dir_path=config_dir_path,
+        config_string = self.generate_config(
+            config_dir_path=self.config_dir_path,
+            data_dir_path=os.getcwd(),
             server_name=server_name,
-            is_generating_file=False,
+            generate_secrets=False,
         )
+        config = yaml.load(config_string)
         config.pop("log_config")
         config.update(specified_config)
 
diff --git a/synapse/config/api.py b/synapse/config/api.py
index 403d96ba76..e8a753f002 100644
--- a/synapse/config/api.py
+++ b/synapse/config/api.py
@@ -24,6 +24,7 @@ class ApiConfig(Config):
             EventTypes.JoinRules,
             EventTypes.CanonicalAlias,
             EventTypes.RoomAvatar,
+            EventTypes.RoomEncryption,
             EventTypes.Name,
         ])
 
@@ -32,9 +33,11 @@ class ApiConfig(Config):
         ## API Configuration ##
 
         # A list of event types that will be included in the room_invite_state
+        #
         room_invite_state_types:
             - "{JoinRules}"
             - "{CanonicalAlias}"
             - "{RoomAvatar}"
+            - "{RoomEncryption}"
             - "{Name}"
         """.format(**vars(EventTypes))
diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
index 3b161d708a..c260d59464 100644
--- a/synapse/config/appservice.py
+++ b/synapse/config/appservice.py
@@ -33,11 +33,18 @@ class AppServiceConfig(Config):
     def read_config(self, config):
         self.app_service_config_files = config.get("app_service_config_files", [])
         self.notify_appservices = config.get("notify_appservices", True)
+        self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
 
     def default_config(cls, **kwargs):
         return """\
         # A list of application service config file to use
+        #
         app_service_config_files: []
+
+        # Whether or not to track application service IP addresses. Implicitly
+        # enables MAU tracking for application service users.
+        #
+        track_appservice_user_ips: False
         """
 
 
diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py
index 7ba0c2de6a..d25196be08 100644
--- a/synapse/config/captcha.py
+++ b/synapse/config/captcha.py
@@ -30,19 +30,22 @@ class CaptchaConfig(Config):
         # See docs/CAPTCHA_SETUP for full details of configuring this.
 
         # This Home Server's ReCAPTCHA public key.
+        #
         recaptcha_public_key: "YOUR_PUBLIC_KEY"
 
         # This Home Server's ReCAPTCHA private key.
+        #
         recaptcha_private_key: "YOUR_PRIVATE_KEY"
 
         # Enables ReCaptcha checks when registering, preventing signup
         # unless a captcha is answered. Requires a valid ReCaptcha
         # public/private key.
+        #
         enable_registration_captcha: False
 
         # A secret key used to bypass the captcha test entirely.
         #captcha_bypass_secret: "YOUR_SECRET_HERE"
 
         # The API endpoint to use for verifying m.login.recaptcha responses.
-        recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
+        recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
         """
diff --git a/synapse/config/cas.py b/synapse/config/cas.py
index 8109e5f95e..609c0815c8 100644
--- a/synapse/config/cas.py
+++ b/synapse/config/cas.py
@@ -38,6 +38,7 @@ class CasConfig(Config):
     def default_config(self, config_dir_path, server_name, **kwargs):
         return """
         # Enable CAS for registration and login.
+        #
         #cas_config:
         #   enabled: true
         #   server_url: "https://cas-server.com"
diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py
index e22c731aad..abeb0180d3 100644
--- a/synapse/config/consent_config.py
+++ b/synapse/config/consent_config.py
@@ -13,6 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from os import path
+
+from synapse.config import ConfigError
+
 from ._base import Config
 
 DEFAULT_CONFIG = """\
@@ -42,18 +46,28 @@ DEFAULT_CONFIG = """\
 # until the user consents to the privacy policy. The value of the setting is
 # used as the text of the error.
 #
-# user_consent:
-#   template_dir: res/templates/privacy
-#   version: 1.0
-#   server_notice_content:
-#     msgtype: m.text
-#     body: >-
-#       To continue using this homeserver you must review and agree to the
-#       terms and conditions at %(consent_uri)s
-#   send_server_notice_to_guests: True
-#   block_events_error: >-
-#     To continue using this homeserver you must review and agree to the
-#     terms and conditions at %(consent_uri)s
+# 'require_at_registration', if enabled, will add a step to the registration
+# process, similar to how captcha works. Users will be required to accept the
+# policy before their account is created.
+#
+# 'policy_name' is the display name of the policy users will see when registering
+# for an account. Has no effect unless `require_at_registration` is enabled.
+# Defaults to "Privacy Policy".
+#
+#user_consent:
+#  template_dir: res/templates/privacy
+#  version: 1.0
+#  server_notice_content:
+#    msgtype: m.text
+#    body: >-
+#      To continue using this homeserver you must review and agree to the
+#      terms and conditions at %(consent_uri)s
+#  send_server_notice_to_guests: True
+#  block_events_error: >-
+#    To continue using this homeserver you must review and agree to the
+#    terms and conditions at %(consent_uri)s
+#  require_at_registration: False
+#  policy_name: Privacy Policy
 #
 """
 
@@ -67,13 +81,23 @@ class ConsentConfig(Config):
         self.user_consent_server_notice_content = None
         self.user_consent_server_notice_to_guests = False
         self.block_events_without_consent_error = None
+        self.user_consent_at_registration = False
+        self.user_consent_policy_name = "Privacy Policy"
 
     def read_config(self, config):
         consent_config = config.get("user_consent")
         if consent_config is None:
             return
         self.user_consent_version = str(consent_config["version"])
-        self.user_consent_template_dir = consent_config["template_dir"]
+        self.user_consent_template_dir = self.abspath(
+            consent_config["template_dir"]
+        )
+        if not path.isdir(self.user_consent_template_dir):
+            raise ConfigError(
+                "Could not find template directory '%s'" % (
+                    self.user_consent_template_dir,
+                ),
+            )
         self.user_consent_server_notice_content = consent_config.get(
             "server_notice_content",
         )
@@ -83,6 +107,12 @@ class ConsentConfig(Config):
         self.user_consent_server_notice_to_guests = bool(consent_config.get(
             "send_server_notice_to_guests", False,
         ))
+        self.user_consent_at_registration = bool(consent_config.get(
+            "require_at_registration", False,
+        ))
+        self.user_consent_policy_name = consent_config.get(
+            "policy_name", "Privacy Policy",
+        )
 
     def default_config(self, **kwargs):
         return DEFAULT_CONFIG
diff --git a/synapse/config/database.py b/synapse/config/database.py
index e915d9d09b..c8890147a6 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -12,6 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import os
 
 from ._base import Config
 
@@ -45,8 +46,8 @@ class DatabaseConfig(Config):
 
         self.set_databasepath(config.get("database_path"))
 
-    def default_config(self, **kwargs):
-        database_path = self.abspath("homeserver.db")
+    def default_config(self, data_dir_path, **kwargs):
+        database_path = os.path.join(data_dir_path, "homeserver.db")
         return """\
         # Database configuration
         database:
diff --git a/synapse/config/groups.py b/synapse/config/groups.py
index 997fa2881f..46933a904c 100644
--- a/synapse/config/groups.py
+++ b/synapse/config/groups.py
@@ -24,9 +24,11 @@ class GroupsConfig(Config):
     def default_config(self, **kwargs):
         return """\
         # Whether to allow non server admins to create groups on this server
+        #
         enable_group_creation: false
 
         # If enabled, non server admins can only create groups with local parts
         # starting with this prefix
-        # group_creation_prefix: "unofficial/"
+        #
+        #group_creation_prefix: "unofficial/"
         """
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 10dd40159f..727fdc54d8 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -32,7 +32,7 @@ from .ratelimiting import RatelimitConfig
 from .registration import RegistrationConfig
 from .repository import ContentRepositoryConfig
 from .room_directory import RoomDirectoryConfig
-from .saml2 import SAML2Config
+from .saml2_config import SAML2Config
 from .server import ServerConfig
 from .server_notices_config import ServerNoticesConfig
 from .spam_checker import SpamCheckerConfig
@@ -42,7 +42,7 @@ from .voip import VoipConfig
 from .workers import WorkerConfig
 
 
-class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
+class HomeServerConfig(ServerConfig, TlsConfig, DatabaseConfig, LoggingConfig,
                        RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
                        VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
                        AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
@@ -53,10 +53,3 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
                        ServerNoticesConfig, RoomDirectoryConfig,
                        ):
     pass
-
-
-if __name__ == '__main__':
-    import sys
-    sys.stdout.write(
-        HomeServerConfig().generate_config(sys.argv[1], sys.argv[2], True)[0]
-    )
diff --git a/synapse/config/jwt_config.py b/synapse/config/jwt_config.py
index 51e7f7e003..ecb4124096 100644
--- a/synapse/config/jwt_config.py
+++ b/synapse/config/jwt_config.py
@@ -46,8 +46,8 @@ class JWTConfig(Config):
         return """\
         # The JWT needs to contain a globally unique "sub" (subject) claim.
         #
-        # jwt_config:
-        #    enabled: true
-        #    secret: "a secret"
-        #    algorithm: "HS256"
+        #jwt_config:
+        #   enabled: true
+        #   secret: "a secret"
+        #   algorithm: "HS256"
         """
diff --git a/synapse/config/key.py b/synapse/config/key.py
index 279c47bb48..35f05fa974 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -40,7 +40,7 @@ class KeyConfig(Config):
     def read_config(self, config):
         self.signing_key = self.read_signing_key(config["signing_key_path"])
         self.old_signing_keys = self.read_old_signing_keys(
-            config["old_signing_keys"]
+            config.get("old_signing_keys", {})
         )
         self.key_refresh_interval = self.parse_duration(
             config["key_refresh_interval"]
@@ -56,9 +56,9 @@ class KeyConfig(Config):
         if not self.macaroon_secret_key:
             # Unfortunately, there are people out there that don't have this
             # set. Lets just be "nice" and derive one from their secret key.
-            logger.warn("Config is missing missing macaroon_secret_key")
-            seed = self.signing_key[0].seed
-            self.macaroon_secret_key = hashlib.sha256(seed)
+            logger.warn("Config is missing macaroon_secret_key")
+            seed = bytes(self.signing_key[0])
+            self.macaroon_secret_key = hashlib.sha256(seed).digest()
 
         self.expire_access_token = config.get("expire_access_token", False)
 
@@ -66,35 +66,46 @@ class KeyConfig(Config):
         # falsification of values
         self.form_secret = config.get("form_secret", None)
 
-    def default_config(self, config_dir_path, server_name, is_generating_file=False,
+    def default_config(self, config_dir_path, server_name, generate_secrets=False,
                        **kwargs):
         base_key_name = os.path.join(config_dir_path, server_name)
 
-        if is_generating_file:
-            macaroon_secret_key = random_string_with_symbols(50)
-            form_secret = '"%s"' % random_string_with_symbols(50)
+        if generate_secrets:
+            macaroon_secret_key = 'macaroon_secret_key: "%s"' % (
+                random_string_with_symbols(50),
+            )
+            form_secret = 'form_secret: "%s"' % random_string_with_symbols(50)
         else:
-            macaroon_secret_key = None
-            form_secret = 'null'
+            macaroon_secret_key = "# macaroon_secret_key: <PRIVATE STRING>"
+            form_secret = "# form_secret: <PRIVATE STRING>"
 
         return """\
-        macaroon_secret_key: "%(macaroon_secret_key)s"
+        # a secret which is used to sign access tokens. If none is specified,
+        # the registration_shared_secret is used, if one is given; otherwise,
+        # a secret key is derived from the signing key.
+        #
+        %(macaroon_secret_key)s
 
         # Used to enable access token expiration.
+        #
         expire_access_token: False
 
         # a secret which is used to calculate HMACs for form values, to stop
-        # falsification of values
-        form_secret: %(form_secret)s
+        # falsification of values. Must be specified for the User Consent
+        # forms to work.
+        #
+        %(form_secret)s
 
         ## Signing Keys ##
 
         # Path to the signing key to sign messages with
+        #
         signing_key_path: "%(base_key_name)s.signing.key"
 
         # The keys that the server used to sign messages with but won't use
         # to sign new messages. E.g. it has lost its private key
-        old_signing_keys: {}
+        #
+        #old_signing_keys:
         #  "ed25519:auto":
         #    # Base64 encoded public key
         #    key: "The public part of your old signing key."
@@ -105,9 +116,11 @@ class KeyConfig(Config):
         # Used to set the valid_until_ts in /key/v2 APIs.
         # Determines how quickly servers will query to check which keys
         # are still valid.
+        #
         key_refresh_interval: "1d" # 1 Day.
 
         # The trusted servers to download signing keys from.
+        #
         perspectives:
           servers:
             "matrix.org":
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index e9a936118d..f6940b65fd 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -15,7 +15,6 @@
 import logging
 import logging.config
 import os
-import signal
 import sys
 from string import Template
 
@@ -24,6 +23,7 @@ import yaml
 from twisted.logger import STDLibLogObserver, globalLogBeginner
 
 import synapse
+from synapse.app import _base as appbase
 from synapse.util.logcontext import LoggingContextFilter
 from synapse.util.versionstring import get_version_string
 
@@ -50,6 +50,7 @@ handlers:
         maxBytes: 104857600
         backupCount: 10
         filters: [context]
+        encoding: utf8
     console:
         class: logging.StreamHandler
         formatter: precise
@@ -79,11 +80,10 @@ class LoggingConfig(Config):
         self.log_file = self.abspath(config.get("log_file"))
 
     def default_config(self, config_dir_path, server_name, **kwargs):
-        log_config = self.abspath(
-            os.path.join(config_dir_path, server_name + ".log.config")
-        )
+        log_config = os.path.join(config_dir_path, server_name + ".log.config")
         return """
         # A yaml python logging config file
+        #
         log_config: "%(log_config)s"
         """ % locals()
 
@@ -137,6 +137,9 @@ def setup_logging(config, use_worker_options=False):
 
         use_worker_options (bool): True to use 'worker_log_config' and
             'worker_log_file' options instead of 'log_config' and 'log_file'.
+
+        register_sighup (func | None): Function to call to register a
+            sighup handler.
     """
     log_config = (config.worker_log_config if use_worker_options
                   else config.log_config)
@@ -179,7 +182,7 @@ def setup_logging(config, use_worker_options=False):
         else:
             handler = logging.StreamHandler()
 
-            def sighup(signum, stack):
+            def sighup(*args):
                 pass
 
         handler.setFormatter(formatter)
@@ -192,20 +195,14 @@ def setup_logging(config, use_worker_options=False):
             with open(log_config, 'r') as f:
                 logging.config.dictConfig(yaml.load(f))
 
-        def sighup(signum, stack):
+        def sighup(*args):
             # it might be better to use a file watcher or something for this.
             load_log_config()
             logging.info("Reloaded log config from %s due to SIGHUP", log_config)
 
         load_log_config()
 
-    # TODO(paul): obviously this is a terrible mechanism for
-    #   stealing SIGHUP, because it means no other part of synapse
-    #   can use it instead. If we want to catch SIGHUP anywhere
-    #   else as well, I'd suggest we find a nicer way to broadcast
-    #   it around.
-    if getattr(signal, "SIGHUP"):
-        signal.signal(signal.SIGHUP, sighup)
+    appbase.register_sighup(sighup)
 
     # make sure that the first thing we log is a thing we can grep backwards
     # for
@@ -246,3 +243,5 @@ def setup_logging(config, use_worker_options=False):
         [_log],
         redirectStandardIO=not config.no_redirect_stdio,
     )
+    if not config.no_redirect_stdio:
+        print("Redirected stdout/stderr to logs")
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index 61155c99d0..ed0498c634 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -13,7 +13,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config
+from ._base import Config, ConfigError
+
+MISSING_SENTRY = (
+    """Missing sentry-sdk library. This is required to enable sentry
+    integration.
+    """
+)
 
 
 class MetricsConfig(Config):
@@ -23,11 +29,43 @@ class MetricsConfig(Config):
         self.metrics_port = config.get("metrics_port")
         self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
 
+        self.sentry_enabled = "sentry" in config
+        if self.sentry_enabled:
+            try:
+                import sentry_sdk  # noqa F401
+            except ImportError:
+                raise ConfigError(MISSING_SENTRY)
+
+            self.sentry_dsn = config["sentry"].get("dsn")
+            if not self.sentry_dsn:
+                raise ConfigError(
+                    "sentry.dsn field is required when sentry integration is enabled",
+                )
+
     def default_config(self, report_stats=None, **kwargs):
-        suffix = "" if report_stats is None else "report_stats: %(report_stats)s\n"
-        return ("""\
+        res = """\
         ## Metrics ###
 
         # Enable collection and rendering of performance metrics
+        #
         enable_metrics: False
-        """ + suffix) % locals()
+
+        # Enable sentry integration
+        # NOTE: While attempts are made to ensure that the logs don't contain
+        # any sensitive information, this cannot be guaranteed. By enabling
+        # this option the sentry server may therefore receive sensitive
+        # information, and it in turn may then diseminate sensitive information
+        # through insecure notification channels if so configured.
+        #
+        #sentry:
+        #    dsn: "..."
+
+        # Whether or not to report anonymized homeserver usage statistics.
+        """
+
+        if report_stats is None:
+            res += "# report_stats: true|false\n"
+        else:
+            res += "report_stats: %s\n" % ('true' if report_stats else 'false')
+
+        return res
diff --git a/synapse/config/password.py b/synapse/config/password.py
index a4bd171399..2a52b9db54 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -28,6 +28,7 @@ class PasswordConfig(Config):
     def default_config(self, config_dir_path, server_name, **kwargs):
         return """
         # Enable password for login.
+        #
         password_config:
            enabled: true
            # Uncomment and change to a secret random string for extra security.
diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py
index f4066abc28..f0a6be0679 100644
--- a/synapse/config/password_auth_providers.py
+++ b/synapse/config/password_auth_providers.py
@@ -52,18 +52,18 @@ class PasswordAuthProviderConfig(Config):
 
     def default_config(self, **kwargs):
         return """\
-        # password_providers:
-        #     - module: "ldap_auth_provider.LdapAuthProvider"
-        #       config:
-        #         enabled: true
-        #         uri: "ldap://ldap.example.com:389"
-        #         start_tls: true
-        #         base: "ou=users,dc=example,dc=com"
-        #         attributes:
-        #            uid: "cn"
-        #            mail: "email"
-        #            name: "givenName"
-        #         #bind_dn:
-        #         #bind_password:
-        #         #filter: "(objectClass=posixAccount)"
+        #password_providers:
+        #    - module: "ldap_auth_provider.LdapAuthProvider"
+        #      config:
+        #        enabled: true
+        #        uri: "ldap://ldap.example.com:389"
+        #        start_tls: true
+        #        base: "ou=users,dc=example,dc=com"
+        #        attributes:
+        #           uid: "cn"
+        #           mail: "email"
+        #           name: "givenName"
+        #        #bind_dn:
+        #        #bind_password:
+        #        #filter: "(objectClass=posixAccount)"
         """
diff --git a/synapse/config/push.py b/synapse/config/push.py
index b7e0d46afa..62c0060c9c 100644
--- a/synapse/config/push.py
+++ b/synapse/config/push.py
@@ -51,11 +51,11 @@ class PushConfig(Config):
         # notification request includes the content of the event (other details
         # like the sender are still included). For `event_id_only` push, it
         # has no effect.
-
+        #
         # For modern android devices the notification content will still appear
         # because it is loaded by the app. iPhone, however will send a
         # notification saying only that a message arrived and who it came from.
         #
         #push:
-        #   include_content: true
+        #  include_content: true
         """
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 83b22dc199..54b71e6841 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -32,27 +32,34 @@ class RatelimitConfig(Config):
         ## Ratelimiting ##
 
         # Number of messages a client can send per second
+        #
         rc_messages_per_second: 0.2
 
         # Number of message a client can send before being throttled
+        #
         rc_message_burst_count: 10.0
 
         # The federation window size in milliseconds
+        #
         federation_rc_window_size: 1000
 
         # The number of federation requests from a single server in a window
         # before the server will delay processing the request.
+        #
         federation_rc_sleep_limit: 10
 
         # The duration in milliseconds to delay processing events from
         # remote servers by if they go over the sleep limit.
+        #
         federation_rc_sleep_delay: 500
 
         # The maximum number of concurrent federation requests allowed
         # from a single server
+        #
         federation_rc_reject_limit: 50
 
         # The number of federation requests to concurrently process from a
         # single server
+        #
         federation_rc_concurrent: 3
         """
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index efc4c35f1d..4c12b0ea47 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -45,6 +45,7 @@ class RegistrationConfig(Config):
 
         self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
         self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
+        self.default_identity_server = config.get("default_identity_server")
         self.allow_guest_access = config.get("allow_guest_access", False)
 
         self.invite_3pid_guest = (
@@ -67,9 +68,17 @@ class RegistrationConfig(Config):
         self.shadow_server = config.get("shadow_server", None)
         self.rewrite_identity_server_urls = config.get("rewrite_identity_server_urls", {})
 
+        self.disable_msisdn_registration = (
+            config.get("disable_msisdn_registration", False)
+        )
 
-    def default_config(self, **kwargs):
-        registration_shared_secret = random_string_with_symbols(50)
+    def default_config(self, generate_secrets=False, **kwargs):
+        if generate_secrets:
+            registration_shared_secret = 'registration_shared_secret: "%s"' % (
+                random_string_with_symbols(50),
+            )
+        else:
+            registration_shared_secret = '# registration_shared_secret: <PRIVATE STRING>'
 
         return """\
         ## Registration ##
@@ -79,9 +88,14 @@ class RegistrationConfig(Config):
 
         # The user must provide all of the below types of 3PID when registering.
         #
-        # registrations_require_3pid:
-        #     - email
-        #     - msisdn
+        #registrations_require_3pid:
+        #  - email
+        #  - msisdn
+
+        # Explicitly disable asking for MSISDNs from the registration
+        # flow (overrides registrations_require_3pid if MSISDNs are set as required)
+        #
+        #disable_msisdn_registration: True
 
         # Derive the user's matrix ID from a type of 3PID used when registering.
         # This overrides any matrix ID the user proposes when calling /register
@@ -118,26 +132,40 @@ class RegistrationConfig(Config):
 
         # If set, allows registration by anyone who also has the shared
         # secret, even if registration is otherwise disabled.
-        registration_shared_secret: "%(registration_shared_secret)s"
+        #
+        %(registration_shared_secret)s
 
         # Set the number of bcrypt rounds used to generate password hash.
         # Larger numbers increase the work factor needed to generate the hash.
         # The default number is 12 (which equates to 2^12 rounds).
         # N.B. that increasing this will exponentially increase the time required
         # to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
+        #
         bcrypt_rounds: 12
 
         # Allows users to register as guests without a password/email/etc, and
         # participate in rooms hosted on this server which have been made
         # accessible to anonymous users.
+        #
         allow_guest_access: False
 
+        # The identity server which we suggest that clients should use when users log
+        # in on this server.
+        #
+        # (By default, no suggestion is made, so it is left up to the client.
+        # This setting is ignored unless public_baseurl is also set.)
+        #
+        #default_identity_server: https://matrix.org
+
         # The list of identity servers trusted to verify third party
         # identifiers by this server.
+        #
+        # Also defines the ID server which will be called when an account is
+        # deactivated (one will be picked arbitrarily).
+        #
         trusted_third_party_id_servers:
-            - matrix.org
-            - vector.im
-            - riot.im
+          - matrix.org
+          - vector.im
 
         # If enabled, user IDs, display names and avatar URLs will be replicated
         # to this server whenever they change.
@@ -163,14 +191,16 @@ class RegistrationConfig(Config):
 
         # Users who register on this homeserver will automatically be joined
         # to these rooms
+        #
         #auto_join_rooms:
-        #    - "#example:example.com"
+        #  - "#example:example.com"
 
         # Where auto_join_rooms are specified, setting this flag ensures that the
         # the rooms exist by creating them when the first user on the
         # homeserver registers.
         # Setting to false means that if the rooms are not manually created,
         # users cannot be auto-joined since they do not exist.
+        #
         autocreate_auto_join_rooms: true
         """ % locals()
 
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 06c62ab62c..97db2a5b7a 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -12,7 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+import os
 from collections import namedtuple
 
 from synapse.util.module_loader import load_module
@@ -175,34 +175,39 @@ class ContentRepositoryConfig(Config):
                 "url_preview_url_blacklist", ()
             )
 
-    def default_config(self, **kwargs):
-        media_store = self.default_path("media_store")
-        uploads_path = self.default_path("uploads")
+    def default_config(self, data_dir_path, **kwargs):
+        media_store = os.path.join(data_dir_path, "media_store")
+        uploads_path = os.path.join(data_dir_path, "uploads")
         return r"""
         # Directory where uploaded images and attachments are stored.
+        #
         media_store_path: "%(media_store)s"
 
         # Media storage providers allow media to be stored in different
         # locations.
-        # media_storage_providers:
-        # - module: file_system
-        #   # Whether to write new local files.
-        #   store_local: false
-        #   # Whether to write new remote media
-        #   store_remote: false
-        #   # Whether to block upload requests waiting for write to this
-        #   # provider to complete
-        #   store_synchronous: false
-        #   config:
-        #     directory: /mnt/some/other/directory
+        #
+        #media_storage_providers:
+        #  - module: file_system
+        #    # Whether to write new local files.
+        #    store_local: false
+        #    # Whether to write new remote media
+        #    store_remote: false
+        #    # Whether to block upload requests waiting for write to this
+        #    # provider to complete
+        #    store_synchronous: false
+        #    config:
+        #       directory: /mnt/some/other/directory
 
         # Directory where in-progress uploads are stored.
+        #
         uploads_path: "%(uploads_path)s"
 
         # The largest allowed upload size in bytes
+        #
         max_upload_size: "10M"
 
         # Maximum number of pixels that will be thumbnailed
+        #
         max_image_pixels: "32M"
 
         # Whether to generate new thumbnails on the fly to precisely match
@@ -210,9 +215,11 @@ class ContentRepositoryConfig(Config):
         # a new resolution is requested by the client the server will
         # generate a new thumbnail. If false the server will pick a thumbnail
         # from a precalculated list.
+        #
         dynamic_thumbnails: false
 
-        # List of thumbnail to precalculate when an image is uploaded.
+        # List of thumbnails to precalculate when an image is uploaded.
+        #
         thumbnail_sizes:
         - width: 32
           height: 32
@@ -233,6 +240,7 @@ class ContentRepositoryConfig(Config):
         # Is the preview URL API enabled?  If enabled, you *must* specify
         # an explicit url_preview_ip_range_blacklist of IPs that the spider is
         # denied from accessing.
+        #
         url_preview_enabled: False
 
         # List of IP address CIDR ranges that the URL preview spider is denied
@@ -243,16 +251,16 @@ class ContentRepositoryConfig(Config):
         # synapse to issue arbitrary GET requests to your internal services,
         # causing serious security issues.
         #
-        # url_preview_ip_range_blacklist:
-        # - '127.0.0.0/8'
-        # - '10.0.0.0/8'
-        # - '172.16.0.0/12'
-        # - '192.168.0.0/16'
-        # - '100.64.0.0/10'
-        # - '169.254.0.0/16'
-        # - '::1/128'
-        # - 'fe80::/64'
-        # - 'fc00::/7'
+        #url_preview_ip_range_blacklist:
+        #  - '127.0.0.0/8'
+        #  - '10.0.0.0/8'
+        #  - '172.16.0.0/12'
+        #  - '192.168.0.0/16'
+        #  - '100.64.0.0/10'
+        #  - '169.254.0.0/16'
+        #  - '::1/128'
+        #  - 'fe80::/64'
+        #  - 'fc00::/7'
         #
         # List of IP address CIDR ranges that the URL preview spider is allowed
         # to access even if they are specified in url_preview_ip_range_blacklist.
@@ -260,8 +268,8 @@ class ContentRepositoryConfig(Config):
         # target IP ranges - e.g. for enabling URL previews for a specific private
         # website only visible in your network.
         #
-        # url_preview_ip_range_whitelist:
-        # - '192.168.1.1'
+        #url_preview_ip_range_whitelist:
+        #   - '192.168.1.1'
 
         # Optional list of URL matches that the URL preview spider is
         # denied from accessing.  You should use url_preview_ip_range_blacklist
@@ -279,26 +287,25 @@ class ContentRepositoryConfig(Config):
         # specified component matches for a given list item succeed, the URL is
         # blacklisted.
         #
-        # url_preview_url_blacklist:
-        # # blacklist any URL with a username in its URI
-        # - username: '*'
+        #url_preview_url_blacklist:
+        #  # blacklist any URL with a username in its URI
+        #  - username: '*'
         #
-        # # blacklist all *.google.com URLs
-        # - netloc: 'google.com'
-        # - netloc: '*.google.com'
+        #  # blacklist all *.google.com URLs
+        #  - netloc: 'google.com'
+        #  - netloc: '*.google.com'
         #
-        # # blacklist all plain HTTP URLs
-        # - scheme: 'http'
+        #  # blacklist all plain HTTP URLs
+        #  - scheme: 'http'
         #
-        # # blacklist http(s)://www.acme.com/foo
-        # - netloc: 'www.acme.com'
-        #   path: '/foo'
+        #  # blacklist http(s)://www.acme.com/foo
+        #  - netloc: 'www.acme.com'
+        #    path: '/foo'
         #
-        # # blacklist any URL with a literal IPv4 address
-        # - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
+        #  # blacklist any URL with a literal IPv4 address
+        #  - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
 
         # The largest allowed URL preview spidering size in bytes
         max_spider_size: "10M"
 
-
         """ % locals()
diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py
index 9da13ab11b..9b897abe3c 100644
--- a/synapse/config/room_directory.py
+++ b/synapse/config/room_directory.py
@@ -20,12 +20,37 @@ from ._base import Config, ConfigError
 
 class RoomDirectoryConfig(Config):
     def read_config(self, config):
-        alias_creation_rules = config["alias_creation_rules"]
+        alias_creation_rules = config.get("alias_creation_rules")
 
-        self._alias_creation_rules = [
-            _AliasRule(rule)
-            for rule in alias_creation_rules
-        ]
+        if alias_creation_rules is not None:
+            self._alias_creation_rules = [
+                _RoomDirectoryRule("alias_creation_rules", rule)
+                for rule in alias_creation_rules
+            ]
+        else:
+            self._alias_creation_rules = [
+                _RoomDirectoryRule(
+                    "alias_creation_rules", {
+                        "action": "allow",
+                    }
+                )
+            ]
+
+        room_list_publication_rules = config.get("room_list_publication_rules")
+
+        if room_list_publication_rules is not None:
+            self._room_list_publication_rules = [
+                _RoomDirectoryRule("room_list_publication_rules", rule)
+                for rule in room_list_publication_rules
+            ]
+        else:
+            self._room_list_publication_rules = [
+                _RoomDirectoryRule(
+                    "room_list_publication_rules", {
+                        "action": "allow",
+                    }
+                )
+            ]
 
     def default_config(self, config_dir_path, server_name, **kwargs):
         return """
@@ -33,60 +58,138 @@ class RoomDirectoryConfig(Config):
         # on this server.
         #
         # The format of this option is a list of rules that contain globs that
-        # match against user_id and the new alias (fully qualified with server
-        # name). The action in the first rule that matches is taken, which can
-        # currently either be "allow" or "deny".
+        # match against user_id, room_id and the new alias (fully qualified with
+        # server name). The action in the first rule that matches is taken,
+        # which can currently either be "allow" or "deny".
+        #
+        # Missing user_id/room_id/alias fields default to "*".
+        #
+        # If no rules match the request is denied. An empty list means no one
+        # can create aliases.
+        #
+        # Options for the rules include:
+        #
+        #   user_id: Matches against the creator of the alias
+        #   alias: Matches against the alias being created
+        #   room_id: Matches against the room ID the alias is being pointed at
+        #   action: Whether to "allow" or "deny" the request if the rule matches
+        #
+        # The default is:
+        #
+        #alias_creation_rules:
+        #  - user_id: "*"
+        #    alias: "*"
+        #    room_id: "*"
+        #    action: allow
+
+        # The `room_list_publication_rules` option controls who can publish and
+        # which rooms can be published in the public room list.
+        #
+        # The format of this option is the same as that for
+        # `alias_creation_rules`.
         #
-        # If no rules match the request is denied.
-        alias_creation_rules:
-            - user_id: "*"
-              alias: "*"
-              action: allow
+        # If the room has one or more aliases associated with it, only one of
+        # the aliases needs to match the alias rule. If there are no aliases
+        # then only rules with `alias: *` match.
+        #
+        # If no rules match the request is denied. An empty list means no one
+        # can publish rooms.
+        #
+        # Options for the rules include:
+        #
+        #   user_id: Matches agaisnt the creator of the alias
+        #   room_id: Matches against the room ID being published
+        #   alias: Matches against any current local or canonical aliases
+        #            associated with the room
+        #   action: Whether to "allow" or "deny" the request if the rule matches
+        #
+        # The default is:
+        #
+        #room_list_publication_rules:
+        #  - user_id: "*"
+        #    alias: "*"
+        #    room_id: "*"
+        #    action: allow
         """
 
-    def is_alias_creation_allowed(self, user_id, alias):
+    def is_alias_creation_allowed(self, user_id, room_id, alias):
         """Checks if the given user is allowed to create the given alias
 
         Args:
             user_id (str)
+            room_id (str)
             alias (str)
 
         Returns:
             boolean: True if user is allowed to crate the alias
         """
         for rule in self._alias_creation_rules:
-            if rule.matches(user_id, alias):
+            if rule.matches(user_id, room_id, [alias]):
+                return rule.action == "allow"
+
+        return False
+
+    def is_publishing_room_allowed(self, user_id, room_id, aliases):
+        """Checks if the given user is allowed to publish the room
+
+        Args:
+            user_id (str)
+            room_id (str)
+            aliases (list[str]): any local aliases associated with the room
+
+        Returns:
+            boolean: True if user can publish room
+        """
+        for rule in self._room_list_publication_rules:
+            if rule.matches(user_id, room_id, aliases):
                 return rule.action == "allow"
 
         return False
 
 
-class _AliasRule(object):
-    def __init__(self, rule):
+class _RoomDirectoryRule(object):
+    """Helper class to test whether a room directory action is allowed, like
+    creating an alias or publishing a room.
+    """
+
+    def __init__(self, option_name, rule):
+        """
+        Args:
+            option_name (str): Name of the config option this rule belongs to
+            rule (dict): The rule as specified in the config
+        """
+
         action = rule["action"]
-        user_id = rule["user_id"]
-        alias = rule["alias"]
+        user_id = rule.get("user_id", "*")
+        room_id = rule.get("room_id", "*")
+        alias = rule.get("alias", "*")
 
         if action in ("allow", "deny"):
             self.action = action
         else:
             raise ConfigError(
-                "alias_creation_rules rules can only have action of 'allow'"
-                " or 'deny'"
+                "%s rules can only have action of 'allow'"
+                " or 'deny'" % (option_name,)
             )
 
+        self._alias_matches_all = alias == "*"
+
         try:
             self._user_id_regex = glob_to_regex(user_id)
             self._alias_regex = glob_to_regex(alias)
+            self._room_id_regex = glob_to_regex(room_id)
         except Exception as e:
             raise ConfigError("Failed to parse glob into regex: %s", e)
 
-    def matches(self, user_id, alias):
-        """Tests if this rule matches the given user_id and alias.
+    def matches(self, user_id, room_id, aliases):
+        """Tests if this rule matches the given user_id, room_id and aliases.
 
         Args:
             user_id (str)
-            alias (str)
+            room_id (str)
+            aliases (list[str]): The associated aliases to the room. Will be a
+                single element for testing alias creation, and can be empty for
+                testing room publishing.
 
         Returns:
             boolean
@@ -96,7 +199,22 @@ class _AliasRule(object):
         if not self._user_id_regex.match(user_id):
             return False
 
-        if not self._alias_regex.match(alias):
+        if not self._room_id_regex.match(room_id):
             return False
 
-        return True
+        # We only have alias checks left, so we can short circuit if the alias
+        # rule matches everything.
+        if self._alias_matches_all:
+            return True
+
+        # If we are not given any aliases then this rule only matches if the
+        # alias glob matches all aliases, which we checked above.
+        if not aliases:
+            return False
+
+        # Otherwise, we just need one alias to match
+        for alias in aliases:
+            if self._alias_regex.match(alias):
+                return True
+
+        return False
diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py
deleted file mode 100644
index 8d7f443021..0000000000
--- a/synapse/config/saml2.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2015 Ericsson
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ._base import Config
-
-
-class SAML2Config(Config):
-    """SAML2 Configuration
-    Synapse uses pysaml2 libraries for providing SAML2 support
-
-    config_path:      Path to the sp_conf.py configuration file
-    idp_redirect_url: Identity provider URL which will redirect
-                      the user back to /login/saml2 with proper info.
-
-    sp_conf.py file is something like:
-    https://github.com/rohe/pysaml2/blob/master/example/sp-repoze/sp_conf.py.example
-
-    More information: https://pythonhosted.org/pysaml2/howto/config.html
-    """
-
-    def read_config(self, config):
-        saml2_config = config.get("saml2_config", None)
-        if saml2_config:
-            self.saml2_enabled = saml2_config.get("enabled", True)
-            self.saml2_config_path = saml2_config["config_path"]
-            self.saml2_idp_redirect_url = saml2_config["idp_redirect_url"]
-        else:
-            self.saml2_enabled = False
-            self.saml2_config_path = None
-            self.saml2_idp_redirect_url = None
-
-    def default_config(self, config_dir_path, server_name, **kwargs):
-        return """
-        # Enable SAML2 for registration and login. Uses pysaml2
-        # config_path:      Path to the sp_conf.py configuration file
-        # idp_redirect_url: Identity provider URL which will redirect
-        #                   the user back to /login/saml2 with proper info.
-        # See pysaml2 docs for format of config.
-        #saml2_config:
-        #   enabled: true
-        #   config_path: "%s/sp_conf.py"
-        #   idp_redirect_url: "http://%s/idp"
-        """ % (config_dir_path, server_name)
diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
new file mode 100644
index 0000000000..aff0a1f00c
--- /dev/null
+++ b/synapse/config/saml2_config.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config, ConfigError
+
+
+class SAML2Config(Config):
+    def read_config(self, config):
+        self.saml2_enabled = False
+
+        saml2_config = config.get("saml2_config")
+
+        if not saml2_config or not saml2_config.get("enabled", True):
+            return
+
+        self.saml2_enabled = True
+
+        import saml2.config
+        self.saml2_sp_config = saml2.config.SPConfig()
+        self.saml2_sp_config.load(self._default_saml_config_dict())
+        self.saml2_sp_config.load(saml2_config.get("sp_config", {}))
+
+        config_path = saml2_config.get("config_path", None)
+        if config_path is not None:
+            self.saml2_sp_config.load_file(config_path)
+
+    def _default_saml_config_dict(self):
+        import saml2
+
+        public_baseurl = self.public_baseurl
+        if public_baseurl is None:
+            raise ConfigError(
+                "saml2_config requires a public_baseurl to be set"
+            )
+
+        metadata_url = public_baseurl + "_matrix/saml2/metadata.xml"
+        response_url = public_baseurl + "_matrix/saml2/authn_response"
+        return {
+            "entityid": metadata_url,
+
+            "service": {
+                "sp": {
+                    "endpoints": {
+                        "assertion_consumer_service": [
+                            (response_url, saml2.BINDING_HTTP_POST),
+                        ],
+                    },
+                    "required_attributes": ["uid"],
+                    "optional_attributes": ["mail", "surname", "givenname"],
+                },
+            }
+        }
+
+    def default_config(self, config_dir_path, server_name, **kwargs):
+        return """
+        # Enable SAML2 for registration and login. Uses pysaml2.
+        #
+        # `sp_config` is the configuration for the pysaml2 Service Provider.
+        # See pysaml2 docs for format of config.
+        #
+        # Default values will be used for the 'entityid' and 'service' settings,
+        # so it is not normally necessary to specify them unless you need to
+        # override them.
+        #
+        #saml2_config:
+        #  sp_config:
+        #    # point this to the IdP's metadata. You can use either a local file or
+        #    # (preferably) a URL.
+        #    metadata:
+        #      #local: ["saml2/idp.xml"]
+        #      remote:
+        #        - url: https://our_idp/metadata.xml
+        #
+        #    # The rest of sp_config is just used to generate our metadata xml, and you
+        #    # may well not need it, depending on your setup. Alternatively you
+        #    # may need a whole lot more detail - see the pysaml2 docs!
+        #
+        #    description: ["My awesome SP", "en"]
+        #    name: ["Test SP", "en"]
+        #
+        #    organization:
+        #      name: Example com
+        #      display_name:
+        #        - ["Example co", "en"]
+        #      url: "http://example.com"
+        #
+        #    contact_person:
+        #      - given_name: Bob
+        #        sur_name: "the Sysadmin"
+        #        email_address": ["admin@example.com"]
+        #        contact_type": technical
+        #
+        #  # Instead of putting the config inline as above, you can specify a
+        #  # separate pysaml2 configuration file:
+        #  #
+        #  config_path: "%(config_dir_path)s/sp_conf.py"
+        """ % {"config_dir_path": config_dir_path}
diff --git a/synapse/config/server.py b/synapse/config/server.py
index c1c7c0105e..4200f10da3 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2017 New Vector Ltd
+# Copyright 2017-2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,13 +15,23 @@
 # limitations under the License.
 
 import logging
+import os.path
 
 from synapse.http.endpoint import parse_and_validate_server_name
+from synapse.python_dependencies import DependencyException, check_requirements
 
 from ._base import Config, ConfigError
 
 logger = logging.Logger(__name__)
 
+# by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes
+# (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen
+# on IPv6 when '::' is set.
+#
+# We later check for errors when binding to 0.0.0.0 and ignore them if :: is also in
+# in the list.
+DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0']
+
 
 class ServerConfig(Config):
 
@@ -34,7 +44,6 @@ class ServerConfig(Config):
             raise ConfigError(str(e))
 
         self.pid_file = self.abspath(config.get("pid_file"))
-        self.web_client = config["web_client"]
         self.web_client_location = config.get("web_client_location", None)
         self.soft_file_limit = config["soft_file_limit"]
         self.daemonize = config.get("daemonize")
@@ -62,6 +71,11 @@ class ServerConfig(Config):
         # master, potentially causing inconsistency.
         self.enable_media_repo = config.get("enable_media_repo", True)
 
+        # whether to enable search. If disabled, new entries will not be inserted
+        # into the search tables and they will not be indexed. Users will receive
+        # errors when attempting to search for messages.
+        self.enable_search = config.get("enable_search", True)
+
         self.filter_timeline_limit = config.get("filter_timeline_limit", -1)
 
         # Whether we should block invites sent to users on this server
@@ -77,6 +91,7 @@ class ServerConfig(Config):
             self.max_mau_value = config.get(
                 "max_mau_value", 0,
             )
+        self.mau_stats_only = config.get("mau_stats_only", False)
 
         self.mau_limits_reserved_threepids = config.get(
             "mau_limit_reserved_threepids", []
@@ -111,27 +126,53 @@ class ServerConfig(Config):
                 self.public_baseurl += '/'
         self.start_pushers = config.get("start_pushers", True)
 
-        self.listeners = config.get("listeners", [])
+        self.listeners = []
+        for listener in config.get("listeners", []):
+            if not isinstance(listener.get("port", None), int):
+                raise ConfigError(
+                    "Listener configuration is lacking a valid 'port' option"
+                )
+
+            if listener.setdefault("tls", False):
+                # no_tls is not really supported any more, but let's grandfather it in
+                # here.
+                if config.get("no_tls", False):
+                    logger.info(
+                        "Ignoring TLS-enabled listener on port %i due to no_tls"
+                    )
+                    continue
 
-        for listener in self.listeners:
             bind_address = listener.pop("bind_address", None)
             bind_addresses = listener.setdefault("bind_addresses", [])
 
+            # if bind_address was specified, add it to the list of addresses
             if bind_address:
                 bind_addresses.append(bind_address)
-            elif not bind_addresses:
-                bind_addresses.append('')
+
+            # if we still have an empty list of addresses, use the default list
+            if not bind_addresses:
+                if listener['type'] == 'metrics':
+                    # the metrics listener doesn't support IPv6
+                    bind_addresses.append('0.0.0.0')
+                else:
+                    bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
+
+            self.listeners.append(listener)
+
+        if not self.web_client_location:
+            _warn_if_webclient_configured(self.listeners)
 
         self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
 
         bind_port = config.get("bind_port")
         if bind_port:
+            if config.get("no_tls", False):
+                raise ConfigError("no_tls is incompatible with bind_port")
+
             self.listeners = []
             bind_host = config.get("bind_host", "")
             gzip_responses = config.get("gzip_responses", True)
 
-            names = ["client", "webclient"] if self.web_client else ["client"]
-
             self.listeners.append({
                 "port": bind_port,
                 "bind_addresses": [bind_host],
@@ -139,7 +180,7 @@ class ServerConfig(Config):
                 "type": "http",
                 "resources": [
                     {
-                        "names": names,
+                        "names": ["client"],
                         "compress": gzip_responses,
                     },
                     {
@@ -158,7 +199,7 @@ class ServerConfig(Config):
                     "type": "http",
                     "resources": [
                         {
-                            "names": names,
+                            "names": ["client"],
                             "compress": gzip_responses,
                         },
                         {
@@ -174,6 +215,7 @@ class ServerConfig(Config):
                 "port": manhole,
                 "bind_addresses": ["127.0.0.1"],
                 "type": "manhole",
+                "tls": False,
             })
 
         metrics_port = config.get("metrics_port")
@@ -197,7 +239,12 @@ class ServerConfig(Config):
                 ]
             })
 
-    def default_config(self, server_name, **kwargs):
+        _check_resource_config(self.listeners)
+
+    def has_tls_listener(self):
+        return any(l["tls"] for l in self.listeners)
+
+    def default_config(self, server_name, data_dir_path, **kwargs):
         _, bind_port = parse_and_validate_server_name(server_name)
         if bind_port is not None:
             unsecure_port = bind_port - 400
@@ -205,7 +252,7 @@ class ServerConfig(Config):
             bind_port = 8448
             unsecure_port = 8008
 
-        pid_file = self.abspath("homeserver.pid")
+        pid_file = os.path.join(data_dir_path, "homeserver.pid")
         return """\
         ## Server ##
 
@@ -239,19 +286,20 @@ class ServerConfig(Config):
         #
         # This setting requires the affinity package to be installed!
         #
-        # cpu_affinity: 0xFFFFFFFF
-
-        # Whether to serve a web client from the HTTP/HTTPS root resource.
-        web_client: True
+        #cpu_affinity: 0xFFFFFFFF
 
-        # The root directory to server for the above web client.
-        # If left undefined, synapse will serve the matrix-angular-sdk web client.
-        # Make sure matrix-angular-sdk is installed with pip if web_client is True
-        # and web_client_location is undefined
-        # web_client_location: "/path/to/web/root"
+        # The path to the web client which will be served at /_matrix/client/
+        # if 'webclient' is configured under the 'listeners' configuration.
+        #
+        #web_client_location: "/path/to/web/root"
 
-        # The public-facing base URL for the client API (not including _matrix/...)
-        # public_baseurl: https://example.com:8448/
+        # The public-facing base URL that clients use to access this HS
+        # (not including _matrix/...). This is the same URL a user would
+        # enter into the 'custom HS URL' field on their client. If you
+        # use synapse with a reverse proxy, this should be the URL to reach
+        # synapse via the proxy.
+        #
+        #public_baseurl: https://example.com/
 
         # Set the soft limit on the number of file descriptors synapse can use
         # Zero is used to indicate synapse should set the soft limit to the
@@ -262,15 +310,25 @@ class ServerConfig(Config):
         use_presence: true
 
         # The GC threshold parameters to pass to `gc.set_threshold`, if defined
-        # gc_thresholds: [700, 10, 10]
+        #
+        #gc_thresholds: [700, 10, 10]
 
         # Set the limit on the returned events in the timeline in the get
         # and sync operations. The default value is -1, means no upper limit.
-        # filter_timeline_limit: 5000
+        #
+        #filter_timeline_limit: 5000
 
         # Whether room invites to users on this server should be blocked
         # (except those sent by local server admins). The default is False.
-        # block_non_admin_invites: True
+        #
+        #block_non_admin_invites: True
+
+        # Room searching
+        #
+        # If disabled, new messages will not be indexed for searching and users
+        # will receive errors when searching for messages. Defaults to enabled.
+        #
+        #enable_search: false
 
         # Restrict federation to the following whitelist of domains.
         # N.B. we recommend also firewalling your federation listener to limit
@@ -278,107 +336,145 @@ class ServerConfig(Config):
         # purely on this application-layer restriction.  If not specified, the
         # default is to whitelist everything.
         #
-        # federation_domain_whitelist:
+        #federation_domain_whitelist:
         #  - lon.example.com
         #  - nyc.example.com
         #  - syd.example.com
 
         # List of ports that Synapse should listen on, their purpose and their
         # configuration.
+        #
+        # Options for each listener include:
+        #
+        #   port: the TCP port to bind to
+        #
+        #   bind_addresses: a list of local addresses to listen on. The default is
+        #       'all local interfaces'.
+        #
+        #   type: the type of listener. Normally 'http', but other valid options are:
+        #       'manhole' (see docs/manhole.md),
+        #       'metrics' (see docs/metrics-howto.rst),
+        #       'replication' (see docs/workers.rst).
+        #
+        #   tls: set to true to enable TLS for this listener. Will use the TLS
+        #       key/cert specified in tls_private_key_path / tls_certificate_path.
+        #
+        #   x_forwarded: Only valid for an 'http' listener. Set to true to use the
+        #       X-Forwarded-For header as the client IP. Useful when Synapse is
+        #       behind a reverse-proxy.
+        #
+        #   resources: Only valid for an 'http' listener. A list of resources to host
+        #       on this port. Options for each resource are:
+        #
+        #       names: a list of names of HTTP resources. See below for a list of
+        #           valid resource names.
+        #
+        #       compress: set to true to enable HTTP comression for this resource.
+        #
+        #   additional_resources: Only valid for an 'http' listener. A map of
+        #        additional endpoints which should be loaded via dynamic modules.
+        #
+        # Valid resource names are:
+        #
+        #   client: the client-server API (/_matrix/client). Also implies 'media' and
+        #       'static'.
+        #
+        #   consent: user consent forms (/_matrix/consent). See
+        #       docs/consent_tracking.md.
+        #
+        #   federation: the server-server API (/_matrix/federation). Also implies
+        #       'media', 'keys', 'openid'
+        #
+        #   keys: the key discovery API (/_matrix/keys).
+        #
+        #   media: the media API (/_matrix/media).
+        #
+        #   metrics: the metrics interface. See docs/metrics-howto.rst.
+        #
+        #   openid: OpenID authentication.
+        #
+        #   replication: the HTTP replication API (/_synapse/replication). See
+        #       docs/workers.rst.
+        #
+        #   static: static resources under synapse/static (/_matrix/static). (Mostly
+        #       useful for 'fallback authentication'.)
+        #
+        #   webclient: A web client. Requires web_client_location to be set.
+        #
         listeners:
-          # Main HTTPS listener
-          # For when matrix traffic is sent directly to synapse.
-          -
-            # The port to listen for HTTPS requests on.
-            port: %(bind_port)s
-
-            # Local addresses to listen on.
-            # On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
-            # addresses by default. For most other OSes, this will only listen
-            # on IPv6.
-            bind_addresses:
-              - '::'
-              - '0.0.0.0'
-
-            # This is a 'http' listener, allows us to specify 'resources'.
-            type: http
-
-            tls: true
-
-            # Use the X-Forwarded-For (XFF) header as the client IP and not the
-            # actual client IP.
-            x_forwarded: false
-
-            # List of HTTP resources to serve on this listener.
-            resources:
-              -
-                # List of resources to host on this listener.
-                names:
-                  - client     # The client-server APIs, both v1 and v2
-                  - webclient  # The bundled webclient.
-
-                # Should synapse compress HTTP responses to clients that support it?
-                # This should be disabled if running synapse behind a load balancer
-                # that can do automatic compression.
-                compress: true
-
-              - names: [federation]  # Federation APIs
-                compress: false
-
-            # optional list of additional endpoints which can be loaded via
-            # dynamic modules
-            # additional_resources:
-            #   "/_matrix/my/custom/endpoint":
-            #     module: my_module.CustomRequestHandler
-            #     config: {}
-
-          # Unsecure HTTP listener,
-          # For when matrix traffic passes through loadbalancer that unwraps TLS.
+          # TLS-enabled listener: for when matrix traffic is sent directly to synapse.
+          #
+          # Disabled by default. To enable it, uncomment the following. (Note that you
+          # will also need to give Synapse a TLS key and certificate: see the TLS section
+          # below.)
+          #
+          #- port: %(bind_port)s
+          #  type: http
+          #  tls: true
+          #  resources:
+          #    - names: [client, federation]
+
+          # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
+          # that unwraps TLS.
+          #
+          # If you plan to use a reverse proxy, please see
+          # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
+          #
           - port: %(unsecure_port)s
             tls: false
-            bind_addresses: ['::', '0.0.0.0']
+            bind_addresses: ['::1', '127.0.0.1']
             type: http
-
-            x_forwarded: false
+            x_forwarded: true
 
             resources:
-              - names: [client, webclient]
-                compress: true
-              - names: [federation]
+              - names: [client, federation]
                 compress: false
 
+            # example additonal_resources:
+            #
+            #additional_resources:
+            #  "/_matrix/my/custom/endpoint":
+            #    module: my_module.CustomRequestHandler
+            #    config: {}
+
           # Turn on the twisted ssh manhole service on localhost on the given
           # port.
-          # - port: 9000
-          #   bind_addresses: ['::1', '127.0.0.1']
-          #   type: manhole
+          #
+          #- port: 9000
+          #  bind_addresses: ['::1', '127.0.0.1']
+          #  type: manhole
 
 
-          # Homeserver blocking
-          #
-          # How to reach the server admin, used in ResourceLimitError
-          # admin_contact: 'mailto:admin@server.com'
-          #
-          # Global block config
-          #
-          # hs_disabled: False
-          # hs_disabled_message: 'Human readable reason for why the HS is blocked'
-          # hs_disabled_limit_type: 'error code(str), to help clients decode reason'
-          #
-          # Monthly Active User Blocking
-          #
-          # Enables monthly active user checking
-          # limit_usage_by_mau: False
-          # max_mau_value: 50
-          # mau_trial_days: 2
-          #
-          # Sometimes the server admin will want to ensure certain accounts are
-          # never blocked by mau checking. These accounts are specified here.
-          #
-          # mau_limit_reserved_threepids:
-          # - medium: 'email'
-          #   address: 'reserved_user@example.com'
+        ## Homeserver blocking ##
 
+        # How to reach the server admin, used in ResourceLimitError
+        #
+        #admin_contact: 'mailto:admin@server.com'
+
+        # Global blocking
+        #
+        #hs_disabled: False
+        #hs_disabled_message: 'Human readable reason for why the HS is blocked'
+        #hs_disabled_limit_type: 'error code(str), to help clients decode reason'
+
+        # Monthly Active User Blocking
+        #
+        #limit_usage_by_mau: False
+        #max_mau_value: 50
+        #mau_trial_days: 2
+
+        # If enabled, the metrics for the number of monthly active users will
+        # be populated, however no one will be limited. If limit_usage_by_mau
+        # is true, this is implied to be true.
+        #
+        #mau_stats_only: False
+
+        # Sometimes the server admin will want to ensure certain accounts are
+        # never blocked by mau checking. These accounts are specified here.
+        #
+        #mau_limit_reserved_threepids:
+        #  - medium: 'email'
+        #    address: 'reserved_user@example.com'
         """ % locals()
 
     def read_arguments(self, args):
@@ -404,19 +500,18 @@ class ServerConfig(Config):
                                   " service on the given port.")
 
 
-def is_threepid_reserved(config, threepid):
+def is_threepid_reserved(reserved_threepids, threepid):
     """Check the threepid against the reserved threepid config
     Args:
-        config(ServerConfig) - to access server config attributes
+        reserved_threepids([dict]) - list of reserved threepids
         threepid(dict) - The threepid to test for
 
     Returns:
         boolean Is the threepid undertest reserved_user
     """
 
-    for tp in config.mau_limits_reserved_threepids:
-        if (threepid['medium'] == tp['medium']
-                and threepid['address'] == tp['address']):
+    for tp in reserved_threepids:
+        if (threepid['medium'] == tp['medium'] and threepid['address'] == tp['address']):
             return True
     return False
 
@@ -436,3 +531,53 @@ def read_gc_thresholds(thresholds):
         raise ConfigError(
             "Value of `gc_threshold` must be a list of three integers if set"
         )
+
+
+NO_MORE_WEB_CLIENT_WARNING = """
+Synapse no longer includes a web client. To enable a web client, configure
+web_client_location. To remove this warning, remove 'webclient' from the 'listeners'
+configuration.
+"""
+
+
+def _warn_if_webclient_configured(listeners):
+    for listener in listeners:
+        for res in listener.get("resources", []):
+            for name in res.get("names", []):
+                if name == 'webclient':
+                    logger.warning(NO_MORE_WEB_CLIENT_WARNING)
+                    return
+
+
+KNOWN_RESOURCES = (
+    'client',
+    'consent',
+    'federation',
+    'keys',
+    'media',
+    'metrics',
+    'openid',
+    'replication',
+    'static',
+    'webclient',
+)
+
+
+def _check_resource_config(listeners):
+    resource_names = set(
+        res_name
+        for listener in listeners
+        for res in listener.get("resources", [])
+        for res_name in res.get("names", [])
+    )
+
+    for resource in resource_names:
+        if resource not in KNOWN_RESOURCES:
+            raise ConfigError(
+                "Unknown listener resource '%s'" % (resource, )
+            )
+        if resource == "consent":
+            try:
+                check_requirements('resources.consent')
+            except DependencyException as e:
+                raise ConfigError(e.message)
diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices_config.py
index 3c39850ac6..529dc0a617 100644
--- a/synapse/config/server_notices_config.py
+++ b/synapse/config/server_notices_config.py
@@ -30,11 +30,11 @@ DEFAULT_CONFIG = """\
 # It's also possible to override the room name, the display name of the
 # "notices" user, and the avatar for the user.
 #
-# server_notices:
-#   system_mxid_localpart: notices
-#   system_mxid_display_name: "Server Notices"
-#   system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
-#   room_name: "Server Notices"
+#server_notices:
+#  system_mxid_localpart: notices
+#  system_mxid_display_name: "Server Notices"
+#  system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
+#  room_name: "Server Notices"
 """
 
 
diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py
index 3fec42bdb0..1502e9faba 100644
--- a/synapse/config/spam_checker.py
+++ b/synapse/config/spam_checker.py
@@ -28,8 +28,8 @@ class SpamCheckerConfig(Config):
 
     def default_config(self, **kwargs):
         return """\
-        # spam_checker:
-        #     module: "my_custom_project.SuperSpamChecker"
-        #     config:
-        #         example_option: 'things'
+        #spam_checker:
+        #  module: "my_custom_project.SuperSpamChecker"
+        #  config:
+        #    example_option: 'things'
         """
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index fef1ea99cb..40045de7ac 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -13,51 +13,62 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
 import os
-import subprocess
+import warnings
+from datetime import datetime
 from hashlib import sha256
 
+import six
+
 from unpaddedbase64 import encode_base64
 
 from OpenSSL import crypto
 
-from ._base import Config
+from synapse.config._base import Config, ConfigError
 
-GENERATE_DH_PARAMS = False
+logger = logging.getLogger(__name__)
 
 
 class TlsConfig(Config):
     def read_config(self, config):
-        self.tls_certificate = self.read_tls_certificate(
-            config.get("tls_certificate_path")
-        )
-        self.tls_certificate_file = config.get("tls_certificate_path")
 
-        self.no_tls = config.get("no_tls", False)
+        acme_config = config.get("acme", None)
+        if acme_config is None:
+            acme_config = {}
 
-        if self.no_tls:
-            self.tls_private_key = None
-        else:
-            self.tls_private_key = self.read_tls_private_key(
-                config.get("tls_private_key_path")
-            )
+        self.acme_enabled = acme_config.get("enabled", False)
 
-        self.tls_dh_params_path = self.check_file(
-            config.get("tls_dh_params_path"), "tls_dh_params"
-        )
+        # hyperlink complains on py2 if this is not a Unicode
+        self.acme_url = six.text_type(acme_config.get(
+            "url", u"https://acme-v01.api.letsencrypt.org/directory"
+        ))
+        self.acme_port = acme_config.get("port", 80)
+        self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0'])
+        self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
+        self.acme_domain = acme_config.get("domain", config.get("server_name"))
 
-        self.tls_fingerprints = config["tls_fingerprints"]
+        self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
+        self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
 
-        # Check that our own certificate is included in the list of fingerprints
-        # and include it if it is not.
-        x509_certificate_bytes = crypto.dump_certificate(
-            crypto.FILETYPE_ASN1,
-            self.tls_certificate
-        )
-        sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
-        sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
-        if sha256_fingerprint not in sha256_fingerprints:
-            self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
+        if self.has_tls_listener():
+            if not self.tls_certificate_file:
+                raise ConfigError(
+                    "tls_certificate_path must be specified if TLS-enabled listeners are "
+                    "configured."
+                )
+            if not self.tls_private_key_file:
+                raise ConfigError(
+                    "tls_private_key_path must be specified if TLS-enabled listeners are "
+                    "configured."
+                )
+
+        self._original_tls_fingerprints = config.get("tls_fingerprints", [])
+
+        if self._original_tls_fingerprints is None:
+            self._original_tls_fingerprints = []
+
+        self.tls_fingerprints = list(self._original_tls_fingerprints)
 
         # This config option applies to non-federation HTTP clients
         # (e.g. for talking to recaptcha, identity servers, and such)
@@ -67,29 +78,176 @@ class TlsConfig(Config):
             "use_insecure_ssl_client_just_for_testing_do_not_use"
         )
 
+        self.tls_certificate = None
+        self.tls_private_key = None
+
+    def is_disk_cert_valid(self, allow_self_signed=True):
+        """
+        Is the certificate we have on disk valid, and if so, for how long?
+
+        Args:
+            allow_self_signed (bool): Should we allow the certificate we
+                read to be self signed?
+
+        Returns:
+            int: Days remaining of certificate validity.
+            None: No certificate exists.
+        """
+        if not os.path.exists(self.tls_certificate_file):
+            return None
+
+        try:
+            with open(self.tls_certificate_file, 'rb') as f:
+                cert_pem = f.read()
+        except Exception:
+            logger.exception("Failed to read existing certificate off disk!")
+            raise
+
+        try:
+            tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
+        except Exception:
+            logger.exception("Failed to parse existing certificate off disk!")
+            raise
+
+        if not allow_self_signed:
+            if tls_certificate.get_subject() == tls_certificate.get_issuer():
+                raise ValueError(
+                    "TLS Certificate is self signed, and this is not permitted"
+                )
+
+        # YYYYMMDDhhmmssZ -- in UTC
+        expires_on = datetime.strptime(
+            tls_certificate.get_notAfter().decode('ascii'), "%Y%m%d%H%M%SZ"
+        )
+        now = datetime.utcnow()
+        days_remaining = (expires_on - now).days
+        return days_remaining
+
+    def read_certificate_from_disk(self, require_cert_and_key):
+        """
+        Read the certificates and private key from disk.
+
+        Args:
+            require_cert_and_key (bool): set to True to throw an error if the certificate
+                and key file are not given
+        """
+        if require_cert_and_key:
+            self.tls_private_key = self.read_tls_private_key()
+            self.tls_certificate = self.read_tls_certificate()
+        elif self.tls_certificate_file:
+            # we only need the certificate for the tls_fingerprints. Reload it if we
+            # can, but it's not a fatal error if we can't.
+            try:
+                self.tls_certificate = self.read_tls_certificate()
+            except Exception as e:
+                logger.info(
+                    "Unable to read TLS certificate (%s). Ignoring as no "
+                    "tls listeners enabled.", e,
+                )
+
+        self.tls_fingerprints = list(self._original_tls_fingerprints)
+
+        if self.tls_certificate:
+            # Check that our own certificate is included in the list of fingerprints
+            # and include it if it is not.
+            x509_certificate_bytes = crypto.dump_certificate(
+                crypto.FILETYPE_ASN1, self.tls_certificate
+            )
+            sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
+            sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
+            if sha256_fingerprint not in sha256_fingerprints:
+                self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
+
     def default_config(self, config_dir_path, server_name, **kwargs):
         base_key_name = os.path.join(config_dir_path, server_name)
 
         tls_certificate_path = base_key_name + ".tls.crt"
         tls_private_key_path = base_key_name + ".tls.key"
-        tls_dh_params_path = base_key_name + ".tls.dh"
 
-        return """\
-        # PEM encoded X509 certificate for TLS.
-        # You can replace the self-signed certificate that synapse
-        # autogenerates on launch with your own SSL certificate + key pair
-        # if you like.  Any required intermediary certificates can be
-        # appended after the primary certificate in hierarchical order.
-        tls_certificate_path: "%(tls_certificate_path)s"
+        # this is to avoid the max line length. Sorrynotsorry
+        proxypassline = (
+            'ProxyPass /.well-known/acme-challenge '
+            'http://localhost:8009/.well-known/acme-challenge'
+        )
+
+        return (
+            """\
+        ## TLS ##
+
+        # PEM-encoded X509 certificate for TLS.
+        # This certificate, as of Synapse 1.0, will need to be a valid and verifiable
+        # certificate, signed by a recognised Certificate Authority.
+        #
+        # See 'ACME support' below to enable auto-provisioning this certificate via
+        # Let's Encrypt.
+        #
+        #tls_certificate_path: "%(tls_certificate_path)s"
+
+        # PEM-encoded private key for TLS
+        #
+        #tls_private_key_path: "%(tls_private_key_path)s"
 
-        # PEM encoded private key for TLS
-        tls_private_key_path: "%(tls_private_key_path)s"
+        # ACME support: This will configure Synapse to request a valid TLS certificate
+        # for your configured `server_name` via Let's Encrypt.
+        #
+        # Note that provisioning a certificate in this way requires port 80 to be
+        # routed to Synapse so that it can complete the http-01 ACME challenge.
+        # By default, if you enable ACME support, Synapse will attempt to listen on
+        # port 80 for incoming http-01 challenges - however, this will likely fail
+        # with 'Permission denied' or a similar error.
+        #
+        # There are a couple of potential solutions to this:
+        #
+        #  * If you already have an Apache, Nginx, or similar listening on port 80,
+        #    you can configure Synapse to use an alternate port, and have your web
+        #    server forward the requests. For example, assuming you set 'port: 8009'
+        #    below, on Apache, you would write:
+        #
+        #    %(proxypassline)s
+        #
+        #  * Alternatively, you can use something like `authbind` to give Synapse
+        #    permission to listen on port 80.
+        #
+        acme:
+            # ACME support is disabled by default. Uncomment the following line
+            # (and tls_certificate_path and tls_private_key_path above) to enable it.
+            #
+            #enabled: true
 
-        # PEM dh parameters for ephemeral keys
-        tls_dh_params_path: "%(tls_dh_params_path)s"
+            # Endpoint to use to request certificates. If you only want to test,
+            # use Let's Encrypt's staging url:
+            #     https://acme-staging.api.letsencrypt.org/directory
+            #
+            #url: https://acme-v01.api.letsencrypt.org/directory
 
-        # Don't bind to the https port
-        no_tls: False
+            # Port number to listen on for the HTTP-01 challenge. Change this if
+            # you are forwarding connections through Apache/Nginx/etc.
+            #
+            #port: 80
+
+            # Local addresses to listen on for incoming connections.
+            # Again, you may want to change this if you are forwarding connections
+            # through Apache/Nginx/etc.
+            #
+            #bind_addresses: ['::', '0.0.0.0']
+
+            # How many days remaining on a certificate before it is renewed.
+            #
+            #reprovision_threshold: 30
+
+            # The domain that the certificate should be for. Normally this
+            # should be the same as your Matrix domain (i.e., 'server_name'), but,
+            # by putting a file at 'https://<server_name>/.well-known/matrix/server',
+            # you can delegate incoming traffic to another server. If you do that,
+            # you should give the target of the delegation here.
+            #
+            # For example: if your 'server_name' is 'example.com', but
+            # 'https://example.com/.well-known/matrix/server' delegates to
+            # 'matrix.example.com', you should put 'matrix.example.com' here.
+            #
+            # If not set, defaults to your 'server_name'.
+            #
+            #domain: matrix.example.com
 
         # List of allowed TLS fingerprints for this server to publish along
         # with the signing keys for this server. Other matrix servers that
@@ -116,80 +274,44 @@ class TlsConfig(Config):
         #   openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
         # or by checking matrix.org/federationtester/api/report?server_name=$host
         #
-        tls_fingerprints: []
-        # tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
-        """ % locals()
+        #tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
 
-    def read_tls_certificate(self, cert_path):
-        cert_pem = self.read_file(cert_path, "tls_certificate")
-        return crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
+        """
+            % locals()
+        )
 
-    def read_tls_private_key(self, private_key_path):
-        private_key_pem = self.read_file(private_key_path, "tls_private_key")
-        return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
+    def read_tls_certificate(self):
+        """Reads the TLS certificate from the configured file, and returns it
 
-    def generate_files(self, config):
-        tls_certificate_path = config["tls_certificate_path"]
-        tls_private_key_path = config["tls_private_key_path"]
-        tls_dh_params_path = config["tls_dh_params_path"]
-
-        if not self.path_exists(tls_private_key_path):
-            with open(tls_private_key_path, "wb") as private_key_file:
-                tls_private_key = crypto.PKey()
-                tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
-                private_key_pem = crypto.dump_privatekey(
-                    crypto.FILETYPE_PEM, tls_private_key
-                )
-                private_key_file.write(private_key_pem)
-        else:
-            with open(tls_private_key_path) as private_key_file:
-                private_key_pem = private_key_file.read()
-                tls_private_key = crypto.load_privatekey(
-                    crypto.FILETYPE_PEM, private_key_pem
+        Also checks if it is self-signed, and warns if so
+
+        Returns:
+            OpenSSL.crypto.X509: the certificate
+        """
+        cert_path = self.tls_certificate_file
+        logger.info("Loading TLS certificate from %s", cert_path)
+        cert_pem = self.read_file(cert_path, "tls_certificate_path")
+        cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
+
+        # Check if it is self-signed, and issue a warning if so.
+        if cert.get_issuer() == cert.get_subject():
+            warnings.warn(
+                (
+                    "Self-signed TLS certificates will not be accepted by Synapse 1.0. "
+                    "Please either provide a valid certificate, or use Synapse's ACME "
+                    "support to provision one."
                 )
+            )
 
-        if not self.path_exists(tls_certificate_path):
-            with open(tls_certificate_path, "wb") as certificate_file:
-                cert = crypto.X509()
-                subject = cert.get_subject()
-                subject.CN = config["server_name"]
-
-                cert.set_serial_number(1000)
-                cert.gmtime_adj_notBefore(0)
-                cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
-                cert.set_issuer(cert.get_subject())
-                cert.set_pubkey(tls_private_key)
-
-                cert.sign(tls_private_key, 'sha256')
-
-                cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
-
-                certificate_file.write(cert_pem)
-
-        if not self.path_exists(tls_dh_params_path):
-            if GENERATE_DH_PARAMS:
-                subprocess.check_call([
-                    "openssl", "dhparam",
-                    "-outform", "PEM",
-                    "-out", tls_dh_params_path,
-                    "2048"
-                ])
-            else:
-                with open(tls_dh_params_path, "w") as dh_params_file:
-                    dh_params_file.write(
-                        "2048-bit DH parameters taken from rfc3526\n"
-                        "-----BEGIN DH PARAMETERS-----\n"
-                        "MIIBCAKCAQEA///////////JD9qiIWjC"
-                        "NMTGYouA3BzRKQJOCIpnzHQCC76mOxOb\n"
-                        "IlFKCHmONATd75UZs806QxswKwpt8l8U"
-                        "N0/hNW1tUcJF5IW1dmJefsb0TELppjft\n"
-                        "awv/XLb0Brft7jhr+1qJn6WunyQRfEsf"
-                        "5kkoZlHs5Fs9wgB8uKFjvwWY2kg2HFXT\n"
-                        "mmkWP6j9JM9fg2VdI9yjrZYcYvNWIIVS"
-                        "u57VKQdwlpZtZww1Tkq8mATxdGwIyhgh\n"
-                        "fDKQXkYuNs474553LBgOhgObJ4Oi7Aei"
-                        "j7XFXfBvTFLJ3ivL9pVYFxg5lUl86pVq\n"
-                        "5RXSJhiY+gUQFXKOWoqsqmj/////////"
-                        "/wIBAg==\n"
-                        "-----END DH PARAMETERS-----\n"
-                    )
+        return cert
+
+    def read_tls_private_key(self):
+        """Reads the TLS private key from the configured file, and returns it
+
+        Returns:
+            OpenSSL.crypto.PKey: the private key
+        """
+        private_key_path = self.tls_private_key_file
+        logger.info("Loading TLS key from %s", private_key_path)
+        private_key_pem = self.read_file(private_key_path, "tls_private_key_path")
+        return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py
index eba99976ac..08352f6c87 100644
--- a/synapse/config/user_directory.py
+++ b/synapse/config/user_directory.py
@@ -50,4 +50,5 @@ class UserDirectoryConfig(Config):
         #   of synapse performing the search itself.
         #   This is an experimental API.
         #   defer_to_id_server: https://id.example.com
+        #  search_all_users: false
         """
diff --git a/synapse/config/voip.py b/synapse/config/voip.py
index d07bd24ffd..257f7c86e7 100644
--- a/synapse/config/voip.py
+++ b/synapse/config/voip.py
@@ -27,20 +27,24 @@ class VoipConfig(Config):
 
     def default_config(self, **kwargs):
         return """\
-        ## Turn ##
+        ## TURN ##
 
         # The public URIs of the TURN server to give to clients
+        #
         #turn_uris: []
 
         # The shared secret used to compute passwords for the TURN server
+        #
         #turn_shared_secret: "YOUR_SHARED_SECRET"
 
         # The Username and password if the TURN server needs them and
         # does not use a token
+        #
         #turn_username: "TURNSERVER_USERNAME"
         #turn_password: "TURNSERVER_PASSWORD"
 
         # How long generated TURN credentials last
+        #
         turn_user_lifetime: "1h"
 
         # Whether guests should be allowed to use the TURN server.
@@ -48,5 +52,6 @@ class VoipConfig(Config):
         # However, it does introduce a slight security risk as it allows users to
         # connect to arbitrary endpoints without having first signed up for a
         # valid account (e.g. by passing a CAPTCHA).
+        #
         turn_allow_guests: True
         """
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index 02b76dfcfb..49cbc7098f 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -1,4 +1,5 @@
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -11,12 +12,14 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
 import logging
 
 from zope.interface import implementer
 
 from OpenSSL import SSL, crypto
 from twisted.internet._sslverify import _defaultCurveName
+from twisted.internet.abstract import isIPAddress, isIPv6Address
 from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
 from twisted.internet.ssl import CertificateOptions, ContextFactory
 from twisted.python.failure import Failure
@@ -42,12 +45,12 @@ class ServerContextFactory(ContextFactory):
             logger.exception("Failed to enable elliptic curve for TLS")
         context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
         context.use_certificate_chain_file(config.tls_certificate_file)
+        context.use_privatekey(config.tls_private_key)
 
-        if not config.no_tls:
-            context.use_privatekey(config.tls_private_key)
-
-        context.load_tmp_dh(config.tls_dh_params_path)
-        context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
+        # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+        context.set_cipher_list(
+            "ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1"
+        )
 
     def getContext(self):
         return self._context
@@ -96,11 +99,15 @@ class ClientTLSOptions(object):
 
     def __init__(self, hostname, ctx):
         self._ctx = ctx
-        self._hostname = hostname
-        self._hostnameBytes = _idnaBytes(hostname)
-        ctx.set_info_callback(
-            _tolerateErrors(self._identityVerifyingInfoCallback)
-        )
+
+        if isIPAddress(hostname) or isIPv6Address(hostname):
+            self._hostnameBytes = hostname.encode('ascii')
+            self._sendSNI = False
+        else:
+            self._hostnameBytes = _idnaBytes(hostname)
+            self._sendSNI = True
+
+        ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback))
 
     def clientConnectionForTLS(self, tlsProtocol):
         context = self._ctx
@@ -109,7 +116,9 @@ class ClientTLSOptions(object):
         return connection
 
     def _identityVerifyingInfoCallback(self, connection, where, ret):
-        if where & SSL.SSL_CB_HANDSHAKE_START:
+        # Literal IPv4 and IPv6 addresses are not permitted
+        # as host names according to the RFCs
+        if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI:
             connection.set_tlsext_host_name(self._hostnameBytes)
 
 
@@ -119,10 +128,8 @@ class ClientTLSOptionsFactory(object):
 
     def __init__(self, config):
         # We don't use config options yet
-        pass
+        self._options = CertificateOptions(verify=False)
 
     def get_options(self, host):
-        return ClientTLSOptions(
-            host,
-            CertificateOptions(verify=False).getContext()
-        )
+        # Use _makeContext so that we get a fresh OpenSSL CTX each time.
+        return ClientTLSOptions(host, self._options._makeContext())
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index 8774b28967..1dfa727fcf 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -23,14 +23,14 @@ from signedjson.sign import sign_json
 from unpaddedbase64 import decode_base64, encode_base64
 
 from synapse.api.errors import Codes, SynapseError
-from synapse.events.utils import prune_event
+from synapse.events.utils import prune_event, prune_event_dict
 
 logger = logging.getLogger(__name__)
 
 
 def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
     """Check whether the hash for this PDU matches the contents"""
-    name, expected_hash = compute_content_hash(event, hash_algorithm)
+    name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm)
     logger.debug("Expecting hash: %s", encode_base64(expected_hash))
 
     # some malformed events lack a 'hashes'. Protect against it being missing
@@ -59,35 +59,70 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
     return message_hash_bytes == expected_hash
 
 
-def compute_content_hash(event, hash_algorithm):
-    event_json = event.get_pdu_json()
-    event_json.pop("age_ts", None)
-    event_json.pop("unsigned", None)
-    event_json.pop("signatures", None)
-    event_json.pop("hashes", None)
-    event_json.pop("outlier", None)
-    event_json.pop("destinations", None)
+def compute_content_hash(event_dict, hash_algorithm):
+    """Compute the content hash of an event, which is the hash of the
+    unredacted event.
 
-    event_json_bytes = encode_canonical_json(event_json)
+    Args:
+        event_dict (dict): The unredacted event as a dict
+        hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
+            to hash the event
+
+    Returns:
+        tuple[str, bytes]: A tuple of the name of hash and the hash as raw
+        bytes.
+    """
+    event_dict = dict(event_dict)
+    event_dict.pop("age_ts", None)
+    event_dict.pop("unsigned", None)
+    event_dict.pop("signatures", None)
+    event_dict.pop("hashes", None)
+    event_dict.pop("outlier", None)
+    event_dict.pop("destinations", None)
+
+    event_json_bytes = encode_canonical_json(event_dict)
 
     hashed = hash_algorithm(event_json_bytes)
     return (hashed.name, hashed.digest())
 
 
 def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
+    """Computes the event reference hash. This is the hash of the redacted
+    event.
+
+    Args:
+        event (FrozenEvent)
+        hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
+            to hash the event
+
+    Returns:
+        tuple[str, bytes]: A tuple of the name of hash and the hash as raw
+        bytes.
+    """
     tmp_event = prune_event(event)
-    event_json = tmp_event.get_pdu_json()
-    event_json.pop("signatures", None)
-    event_json.pop("age_ts", None)
-    event_json.pop("unsigned", None)
-    event_json_bytes = encode_canonical_json(event_json)
+    event_dict = tmp_event.get_pdu_json()
+    event_dict.pop("signatures", None)
+    event_dict.pop("age_ts", None)
+    event_dict.pop("unsigned", None)
+    event_json_bytes = encode_canonical_json(event_dict)
     hashed = hash_algorithm(event_json_bytes)
     return (hashed.name, hashed.digest())
 
 
-def compute_event_signature(event, signature_name, signing_key):
-    tmp_event = prune_event(event)
-    redact_json = tmp_event.get_pdu_json()
+def compute_event_signature(event_dict, signature_name, signing_key):
+    """Compute the signature of the event for the given name and key.
+
+    Args:
+        event_dict (dict): The event as a dict
+        signature_name (str): The name of the entity signing the event
+            (typically the server's hostname).
+        signing_key (syutil.crypto.SigningKey): The key to sign with
+
+    Returns:
+        dict[str, dict[str, str]]: Returns a dictionary in the same format of
+        an event's signatures field.
+    """
+    redact_json = prune_event_dict(event_dict)
     redact_json.pop("age_ts", None)
     redact_json.pop("unsigned", None)
     logger.debug("Signing event: %s", encode_canonical_json(redact_json))
@@ -96,25 +131,25 @@ def compute_event_signature(event, signature_name, signing_key):
     return redact_json["signatures"]
 
 
-def add_hashes_and_signatures(event, signature_name, signing_key,
+def add_hashes_and_signatures(event_dict, signature_name, signing_key,
                               hash_algorithm=hashlib.sha256):
-    # if hasattr(event, "old_state_events"):
-    #     state_json_bytes = encode_canonical_json(
-    #         [e.event_id for e in event.old_state_events.values()]
-    #     )
-    #     hashed = hash_algorithm(state_json_bytes)
-    #     event.state_hash = {
-    #         hashed.name: encode_base64(hashed.digest())
-    #     }
-
-    name, digest = compute_content_hash(event, hash_algorithm=hash_algorithm)
-
-    if not hasattr(event, "hashes"):
-        event.hashes = {}
-    event.hashes[name] = encode_base64(digest)
-
-    event.signatures = compute_event_signature(
-        event,
+    """Add content hash and sign the event
+
+    Args:
+        event_dict (dict): The event to add hashes to and sign
+        signature_name (str): The name of the entity signing the event
+            (typically the server's hostname).
+        signing_key (syutil.crypto.SigningKey): The key to sign with
+        hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
+            to hash the event
+    """
+
+    name, digest = compute_content_hash(event_dict, hash_algorithm=hash_algorithm)
+
+    event_dict.setdefault("hashes", {})[name] = encode_base64(digest)
+
+    event_dict["signatures"] = compute_event_signature(
+        event_dict,
         signature_name=signature_name,
         signing_key=signing_key,
     )
diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py
deleted file mode 100644
index 080c81f14b..0000000000
--- a/synapse/crypto/keyclient.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-
-from canonicaljson import json
-
-from twisted.internet import defer, reactor
-from twisted.internet.error import ConnectError
-from twisted.internet.protocol import Factory
-from twisted.names.error import DomainError
-from twisted.web.http import HTTPClient
-
-from synapse.http.endpoint import matrix_federation_endpoint
-from synapse.util import logcontext
-
-logger = logging.getLogger(__name__)
-
-KEY_API_V1 = b"/_matrix/key/v1/"
-
-
-@defer.inlineCallbacks
-def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
-    """Fetch the keys for a remote server."""
-
-    factory = SynapseKeyClientFactory()
-    factory.path = path
-    factory.host = server_name
-    endpoint = matrix_federation_endpoint(
-        reactor, server_name, tls_client_options_factory, timeout=30
-    )
-
-    for i in range(5):
-        try:
-            with logcontext.PreserveLoggingContext():
-                protocol = yield endpoint.connect(factory)
-                server_response, server_certificate = yield protocol.remote_key
-                defer.returnValue((server_response, server_certificate))
-        except SynapseKeyClientError as e:
-            logger.warn("Error getting key for %r: %s", server_name, e)
-            if e.status.startswith(b"4"):
-                # Don't retry for 4xx responses.
-                raise IOError("Cannot get key for %r" % server_name)
-        except (ConnectError, DomainError) as e:
-            logger.warn("Error getting key for %r: %s", server_name, e)
-        except Exception:
-            logger.exception("Error getting key for %r", server_name)
-    raise IOError("Cannot get key for %r" % server_name)
-
-
-class SynapseKeyClientError(Exception):
-    """The key wasn't retrieved from the remote server."""
-    status = None
-    pass
-
-
-class SynapseKeyClientProtocol(HTTPClient):
-    """Low level HTTPS client which retrieves an application/json response from
-    the server and extracts the X.509 certificate for the remote peer from the
-    SSL connection."""
-
-    timeout = 30
-
-    def __init__(self):
-        self.remote_key = defer.Deferred()
-        self.host = None
-        self._peer = None
-
-    def connectionMade(self):
-        self._peer = self.transport.getPeer()
-        logger.debug("Connected to %s", self._peer)
-
-        if not isinstance(self.path, bytes):
-            self.path = self.path.encode('ascii')
-
-        if not isinstance(self.host, bytes):
-            self.host = self.host.encode('ascii')
-
-        self.sendCommand(b"GET", self.path)
-        if self.host:
-            self.sendHeader(b"Host", self.host)
-        self.endHeaders()
-        self.timer = reactor.callLater(
-            self.timeout,
-            self.on_timeout
-        )
-
-    def errback(self, error):
-        if not self.remote_key.called:
-            self.remote_key.errback(error)
-
-    def callback(self, result):
-        if not self.remote_key.called:
-            self.remote_key.callback(result)
-
-    def handleStatus(self, version, status, message):
-        if status != b"200":
-            # logger.info("Non-200 response from %s: %s %s",
-            #            self.transport.getHost(), status, message)
-            error = SynapseKeyClientError(
-                "Non-200 response %r from %r" % (status, self.host)
-            )
-            error.status = status
-            self.errback(error)
-            self.transport.abortConnection()
-
-    def handleResponse(self, response_body_bytes):
-        try:
-            json_response = json.loads(response_body_bytes)
-        except ValueError:
-            # logger.info("Invalid JSON response from %s",
-            #            self.transport.getHost())
-            self.transport.abortConnection()
-            return
-
-        certificate = self.transport.getPeerCertificate()
-        self.callback((json_response, certificate))
-        self.transport.abortConnection()
-        self.timer.cancel()
-
-    def on_timeout(self):
-        logger.debug(
-            "Timeout waiting for response from %s: %s",
-            self.host, self._peer,
-        )
-        self.errback(IOError("Timeout waiting for response"))
-        self.transport.abortConnection()
-
-
-class SynapseKeyClientFactory(Factory):
-    def protocol(self):
-        protocol = SynapseKeyClientProtocol()
-        protocol.path = self.path
-        protocol.host = self.host
-        return protocol
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index d89f94c219..7474fd515f 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2017 New Vector Ltd.
+# Copyright 2017, 2018 New Vector Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,10 +14,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import hashlib
 import logging
 from collections import namedtuple
 
+from six import raise_from
 from six.moves import urllib
 
 from signedjson.key import (
@@ -32,13 +32,16 @@ from signedjson.sign import (
     signature_ids,
     verify_signed_json,
 )
-from unpaddedbase64 import decode_base64, encode_base64
+from unpaddedbase64 import decode_base64
 
-from OpenSSL import crypto
 from twisted.internet import defer
 
-from synapse.api.errors import Codes, SynapseError
-from synapse.crypto.keyclient import fetch_server_key
+from synapse.api.errors import (
+    Codes,
+    HttpResponseException,
+    RequestSendFailed,
+    SynapseError,
+)
 from synapse.util import logcontext, unwrapFirstError
 from synapse.util.logcontext import (
     LoggingContext,
@@ -47,6 +50,7 @@ from synapse.util.logcontext import (
     run_in_background,
 )
 from synapse.util.metrics import Measure
+from synapse.util.retryutils import NotRetryingDestination
 
 logger = logging.getLogger(__name__)
 
@@ -370,13 +374,18 @@ class Keyring(object):
                     server_name_and_key_ids, perspective_name, perspective_keys
                 )
                 defer.returnValue(result)
+            except KeyLookupError as e:
+                logger.warning(
+                    "Key lookup failed from %r: %s", perspective_name, e,
+                )
             except Exception as e:
                 logger.exception(
                     "Unable to get key from %r: %s %s",
                     perspective_name,
                     type(e).__name__, str(e),
                 )
-                defer.returnValue({})
+
+            defer.returnValue({})
 
         results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
             [
@@ -395,32 +404,13 @@ class Keyring(object):
 
     @defer.inlineCallbacks
     def get_keys_from_server(self, server_name_and_key_ids):
-        @defer.inlineCallbacks
-        def get_key(server_name, key_ids):
-            keys = None
-            try:
-                keys = yield self.get_server_verify_key_v2_direct(
-                    server_name, key_ids
-                )
-            except Exception as e:
-                logger.info(
-                    "Unable to get key %r for %r directly: %s %s",
-                    key_ids, server_name,
-                    type(e).__name__, str(e),
-                )
-
-            if not keys:
-                keys = yield self.get_server_verify_key_v1_direct(
-                    server_name, key_ids
-                )
-
-                keys = {server_name: keys}
-
-            defer.returnValue(keys)
-
         results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
             [
-                run_in_background(get_key, server_name, key_ids)
+                run_in_background(
+                    self.get_server_verify_key_v2_direct,
+                    server_name,
+                    key_ids,
+                )
                 for server_name, key_ids in server_name_and_key_ids
             ],
             consumeErrors=True,
@@ -443,21 +433,30 @@ class Keyring(object):
         # TODO(mark): Set the minimum_valid_until_ts to that needed by
         # the events being validated or the current time if validating
         # an incoming request.
-        query_response = yield self.client.post_json(
-            destination=perspective_name,
-            path="/_matrix/key/v2/query",
-            data={
-                u"server_keys": {
-                    server_name: {
-                        key_id: {
-                            u"minimum_valid_until_ts": 0
-                        } for key_id in key_ids
+        try:
+            query_response = yield self.client.post_json(
+                destination=perspective_name,
+                path="/_matrix/key/v2/query",
+                data={
+                    u"server_keys": {
+                        server_name: {
+                            key_id: {
+                                u"minimum_valid_until_ts": 0
+                            } for key_id in key_ids
+                        }
+                        for server_name, key_ids in server_names_and_key_ids
                     }
-                    for server_name, key_ids in server_names_and_key_ids
-                }
-            },
-            long_retries=True,
-        )
+                },
+                long_retries=True,
+            )
+        except (NotRetryingDestination, RequestSendFailed) as e:
+            raise_from(
+                KeyLookupError("Failed to connect to remote server"), e,
+            )
+        except HttpResponseException as e:
+            raise_from(
+                KeyLookupError("Remote server returned an error"), e,
+            )
 
         keys = {}
 
@@ -524,34 +523,25 @@ class Keyring(object):
             if requested_key_id in keys:
                 continue
 
-            (response, tls_certificate) = yield fetch_server_key(
-                server_name, self.hs.tls_client_options_factory,
-                path=("/_matrix/key/v2/server/%s" % (
-                    urllib.parse.quote(requested_key_id),
-                )).encode("ascii"),
-            )
+            try:
+                response = yield self.client.get_json(
+                    destination=server_name,
+                    path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
+                    ignore_backoff=True,
+                )
+            except (NotRetryingDestination, RequestSendFailed) as e:
+                raise_from(
+                    KeyLookupError("Failed to connect to remote server"), e,
+                )
+            except HttpResponseException as e:
+                raise_from(
+                    KeyLookupError("Remote server returned an error"), e,
+                )
 
             if (u"signatures" not in response
                     or server_name not in response[u"signatures"]):
                 raise KeyLookupError("Key response not signed by remote server")
 
-            if "tls_fingerprints" not in response:
-                raise KeyLookupError("Key response missing TLS fingerprints")
-
-            certificate_bytes = crypto.dump_certificate(
-                crypto.FILETYPE_ASN1, tls_certificate
-            )
-            sha256_fingerprint = hashlib.sha256(certificate_bytes).digest()
-            sha256_fingerprint_b64 = encode_base64(sha256_fingerprint)
-
-            response_sha256_fingerprints = set()
-            for fingerprint in response[u"tls_fingerprints"]:
-                if u"sha256" in fingerprint:
-                    response_sha256_fingerprints.add(fingerprint[u"sha256"])
-
-            if sha256_fingerprint_b64 not in response_sha256_fingerprints:
-                raise KeyLookupError("TLS certificate not allowed by fingerprints")
-
             response_keys = yield self.process_v2_response(
                 from_server=server_name,
                 requested_ids=[requested_key_id],
@@ -657,78 +647,6 @@ class Keyring(object):
 
         defer.returnValue(results)
 
-    @defer.inlineCallbacks
-    def get_server_verify_key_v1_direct(self, server_name, key_ids):
-        """Finds a verification key for the server with one of the key ids.
-        Args:
-            server_name (str): The name of the server to fetch a key for.
-            keys_ids (list of str): The key_ids to check for.
-        """
-
-        # Try to fetch the key from the remote server.
-
-        (response, tls_certificate) = yield fetch_server_key(
-            server_name, self.hs.tls_client_options_factory
-        )
-
-        # Check the response.
-
-        x509_certificate_bytes = crypto.dump_certificate(
-            crypto.FILETYPE_ASN1, tls_certificate
-        )
-
-        if ("signatures" not in response
-                or server_name not in response["signatures"]):
-            raise KeyLookupError("Key response not signed by remote server")
-
-        if "tls_certificate" not in response:
-            raise KeyLookupError("Key response missing TLS certificate")
-
-        tls_certificate_b64 = response["tls_certificate"]
-
-        if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
-            raise KeyLookupError("TLS certificate doesn't match")
-
-        # Cache the result in the datastore.
-
-        time_now_ms = self.clock.time_msec()
-
-        verify_keys = {}
-        for key_id, key_base64 in response["verify_keys"].items():
-            if is_signing_algorithm_supported(key_id):
-                key_bytes = decode_base64(key_base64)
-                verify_key = decode_verify_key_bytes(key_id, key_bytes)
-                verify_key.time_added = time_now_ms
-                verify_keys[key_id] = verify_key
-
-        for key_id in response["signatures"][server_name]:
-            if key_id not in response["verify_keys"]:
-                raise KeyLookupError(
-                    "Key response must include verification keys for all"
-                    " signatures"
-                )
-            if key_id in verify_keys:
-                verify_signed_json(
-                    response,
-                    server_name,
-                    verify_keys[key_id]
-                )
-
-        yield self.store.store_server_certificate(
-            server_name,
-            server_name,
-            time_now_ms,
-            tls_certificate,
-        )
-
-        yield self.store_keys(
-            server_name=server_name,
-            from_server=server_name,
-            verify_keys=verify_keys,
-        )
-
-        defer.returnValue(verify_keys)
-
     def store_keys(self, server_name, from_server, verify_keys):
         """Store a collection of verify keys for a given server
         Args:
@@ -768,7 +686,7 @@ def _handle_key_deferred(verify_request):
     try:
         with PreserveLoggingContext():
             _, key_id, verify_key = yield verify_request.deferred
-    except IOError as e:
+    except (IOError, RequestSendFailed) as e:
         logger.warn(
             "Got IOError when downloading keys for %s: %s %s",
             server_name, type(e).__name__, str(e),
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index d4d4474847..8f9e330da5 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -20,17 +20,25 @@ from signedjson.key import decode_verify_key_bytes
 from signedjson.sign import SignatureVerifyException, verify_signed_json
 from unpaddedbase64 import decode_base64
 
-from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, JoinRules, Membership
+from synapse.api.constants import (
+    KNOWN_ROOM_VERSIONS,
+    EventFormatVersions,
+    EventTypes,
+    JoinRules,
+    Membership,
+    RoomVersions,
+)
 from synapse.api.errors import AuthError, EventSizeError, SynapseError
 from synapse.types import UserID, get_domain_from_id
 
 logger = logging.getLogger(__name__)
 
 
-def check(event, auth_events, do_sig_check=True, do_size_check=True):
+def check(room_version, event, auth_events, do_sig_check=True, do_size_check=True):
     """ Checks if this event is correctly authed.
 
     Args:
+        room_version (str): the version of the room
         event: the event being checked.
         auth_events (dict: event-key -> event): the existing room state.
 
@@ -48,7 +56,6 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
 
     if do_sig_check:
         sender_domain = get_domain_from_id(event.sender)
-        event_id_domain = get_domain_from_id(event.event_id)
 
         is_invite_via_3pid = (
             event.type == EventTypes.Member
@@ -65,9 +72,13 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
             if not is_invite_via_3pid:
                 raise AuthError(403, "Event not signed by sender's server")
 
-        # Check the event_id's domain has signed the event
-        if not event.signatures.get(event_id_domain):
-            raise AuthError(403, "Event not signed by sending server")
+        if event.format_version in (EventFormatVersions.V1,):
+            # Only older room versions have event IDs to check.
+            event_id_domain = get_domain_from_id(event.event_id)
+
+            # Check the origin domain has signed the event
+            if not event.signatures.get(event_id_domain):
+                raise AuthError(403, "Event not signed by sending server")
 
     if auth_events is None:
         # Oh, we don't know what the state of the room was, so we
@@ -167,7 +178,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
         _check_power_levels(event, auth_events)
 
     if event.type == EventTypes.Redaction:
-        check_redaction(event, auth_events)
+        check_redaction(room_version, event, auth_events)
 
     logger.debug("Allowing! %s", event)
 
@@ -200,11 +211,11 @@ def _is_membership_change_allowed(event, auth_events):
     membership = event.content["membership"]
 
     # Check if this is the room creator joining:
-    if len(event.prev_events) == 1 and Membership.JOIN == membership:
+    if len(event.prev_event_ids()) == 1 and Membership.JOIN == membership:
         # Get room creation event:
         key = (EventTypes.Create, "", )
         create = auth_events.get(key)
-        if create and event.prev_events[0][0] == create.event_id:
+        if create and event.prev_event_ids()[0] == create.event_id:
             if create.content["creator"] == event.state_key:
                 return
 
@@ -421,7 +432,7 @@ def _can_send_event(event, auth_events):
     return True
 
 
-def check_redaction(event, auth_events):
+def check_redaction(room_version, event, auth_events):
     """Check whether the event sender is allowed to redact the target event.
 
     Returns:
@@ -441,10 +452,16 @@ def check_redaction(event, auth_events):
     if user_level >= redact_level:
         return False
 
-    redacter_domain = get_domain_from_id(event.event_id)
-    redactee_domain = get_domain_from_id(event.redacts)
-    if redacter_domain == redactee_domain:
+    if room_version in (RoomVersions.V1, RoomVersions.V2,):
+        redacter_domain = get_domain_from_id(event.event_id)
+        redactee_domain = get_domain_from_id(event.redacts)
+        if redacter_domain == redactee_domain:
+            return True
+    elif room_version == RoomVersions.V3:
+        event.internal_metadata.recheck_redaction = True
         return True
+    else:
+        raise RuntimeError("Unrecognized room version %r" % (room_version,))
 
     raise AuthError(
         403,
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 12f1eb0a3e..20c1ab4203 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,6 +19,9 @@ from distutils.util import strtobool
 
 import six
 
+from unpaddedbase64 import encode_base64
+
+from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventFormatVersions, RoomVersions
 from synapse.util.caches import intern_dict
 from synapse.util.frozenutils import freeze
 
@@ -41,8 +45,13 @@ class _EventInternalMetadata(object):
     def is_outlier(self):
         return getattr(self, "outlier", False)
 
-    def is_invite_from_remote(self):
-        return getattr(self, "invite_from_remote", False)
+    def is_out_of_band_membership(self):
+        """Whether this is an out of band membership, like an invite or an invite
+        rejection. This is needed as those events are marked as outliers, but
+        they still need to be processed as if they're new events (e.g. updating
+        invite state in the database, relaying to clients, etc).
+        """
+        return getattr(self, "out_of_band_membership", False)
 
     def get_send_on_behalf_of(self):
         """Whether this server should send the event on behalf of another server.
@@ -53,6 +62,21 @@ class _EventInternalMetadata(object):
         """
         return getattr(self, "send_on_behalf_of", None)
 
+    def need_to_check_redaction(self):
+        """Whether the redaction event needs to be rechecked when fetching
+        from the database.
+
+        Starting in room v3 redaction events are accepted up front, and later
+        checked to see if the redacter and redactee's domains match.
+
+        If the sender of the redaction event is allowed to redact any event
+        due to auth rules, then this will always return false.
+
+        Returns:
+            bool
+        """
+        return getattr(self, "recheck_redaction", False)
+
 
 def _event_dict_property(key):
     # We want to be able to use hasattr with the event dict properties.
@@ -159,8 +183,28 @@ class EventBase(object):
     def keys(self):
         return six.iterkeys(self._event_dict)
 
+    def prev_event_ids(self):
+        """Returns the list of prev event IDs. The order matches the order
+        specified in the event, though there is no meaning to it.
+
+        Returns:
+            list[str]: The list of event IDs of this event's prev_events
+        """
+        return [e for e, _ in self.prev_events]
+
+    def auth_event_ids(self):
+        """Returns the list of auth event IDs. The order matches the order
+        specified in the event, though there is no meaning to it.
+
+        Returns:
+            list[str]: The list of event IDs of this event's auth_events
+        """
+        return [e for e, _ in self.auth_events]
+
 
 class FrozenEvent(EventBase):
+    format_version = EventFormatVersions.V1  # All events of this type are V1
+
     def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
         event_dict = dict(event_dict)
 
@@ -195,22 +239,136 @@ class FrozenEvent(EventBase):
             rejected_reason=rejected_reason,
         )
 
-    @staticmethod
-    def from_event(event):
-        e = FrozenEvent(
-            event.get_pdu_json()
+    def __str__(self):
+        return self.__repr__()
+
+    def __repr__(self):
+        return "<FrozenEvent event_id='%s', type='%s', state_key='%s'>" % (
+            self.get("event_id", None),
+            self.get("type", None),
+            self.get("state_key", None),
+        )
+
+
+class FrozenEventV2(EventBase):
+    format_version = EventFormatVersions.V2  # All events of this type are V2
+
+    def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
+        event_dict = dict(event_dict)
+
+        # Signatures is a dict of dicts, and this is faster than doing a
+        # copy.deepcopy
+        signatures = {
+            name: {sig_id: sig for sig_id, sig in sigs.items()}
+            for name, sigs in event_dict.pop("signatures", {}).items()
+        }
+
+        assert "event_id" not in event_dict
+
+        unsigned = dict(event_dict.pop("unsigned", {}))
+
+        # We intern these strings because they turn up a lot (especially when
+        # caching).
+        event_dict = intern_dict(event_dict)
+
+        if USE_FROZEN_DICTS:
+            frozen_dict = freeze(event_dict)
+        else:
+            frozen_dict = event_dict
+
+        self._event_id = None
+        self.type = event_dict["type"]
+        if "state_key" in event_dict:
+            self.state_key = event_dict["state_key"]
+
+        super(FrozenEventV2, self).__init__(
+            frozen_dict,
+            signatures=signatures,
+            unsigned=unsigned,
+            internal_metadata_dict=internal_metadata_dict,
+            rejected_reason=rejected_reason,
         )
 
-        e.internal_metadata = event.internal_metadata
+    @property
+    def event_id(self):
+        # We have to import this here as otherwise we get an import loop which
+        # is hard to break.
+        from synapse.crypto.event_signing import compute_event_reference_hash
+
+        if self._event_id:
+            return self._event_id
+        self._event_id = "$" + encode_base64(compute_event_reference_hash(self)[1])
+        return self._event_id
+
+    def prev_event_ids(self):
+        """Returns the list of prev event IDs. The order matches the order
+        specified in the event, though there is no meaning to it.
+
+        Returns:
+            list[str]: The list of event IDs of this event's prev_events
+        """
+        return self.prev_events
+
+    def auth_event_ids(self):
+        """Returns the list of auth event IDs. The order matches the order
+        specified in the event, though there is no meaning to it.
 
-        return e
+        Returns:
+            list[str]: The list of event IDs of this event's auth_events
+        """
+        return self.auth_events
 
     def __str__(self):
         return self.__repr__()
 
     def __repr__(self):
-        return "<FrozenEvent event_id='%s', type='%s', state_key='%s'>" % (
-            self.get("event_id", None),
+        return "<FrozenEventV2 event_id='%s', type='%s', state_key='%s'>" % (
+            self.event_id,
             self.get("type", None),
             self.get("state_key", None),
         )
+
+
+def room_version_to_event_format(room_version):
+    """Converts a room version string to the event format
+
+    Args:
+        room_version (str)
+
+    Returns:
+        int
+    """
+    if room_version not in KNOWN_ROOM_VERSIONS:
+        # We should have already checked version, so this should not happen
+        raise RuntimeError("Unrecognized room version %s" % (room_version,))
+
+    if room_version in (
+        RoomVersions.V1, RoomVersions.V2, RoomVersions.STATE_V2_TEST,
+    ):
+        return EventFormatVersions.V1
+    elif room_version in (RoomVersions.V3,):
+        return EventFormatVersions.V2
+    else:
+        raise RuntimeError("Unrecognized room version %s" % (room_version,))
+
+
+def event_type_from_format_version(format_version):
+    """Returns the python type to use to construct an Event object for the
+    given event format version.
+
+    Args:
+        format_version (int): The event format version
+
+    Returns:
+        type: A type that can be initialized as per the initializer of
+        `FrozenEvent`
+    """
+
+    if format_version == EventFormatVersions.V1:
+        return FrozenEvent
+    elif format_version == EventFormatVersions.V2:
+        return FrozenEventV2
+    else:
+        raise Exception(
+            "No event format %r" % (format_version,)
+        )
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index e662eaef10..06e01be918 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -13,63 +13,270 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import copy
+import attr
 
+from twisted.internet import defer
+
+from synapse.api.constants import (
+    KNOWN_EVENT_FORMAT_VERSIONS,
+    KNOWN_ROOM_VERSIONS,
+    MAX_DEPTH,
+    EventFormatVersions,
+)
+from synapse.crypto.event_signing import add_hashes_and_signatures
 from synapse.types import EventID
 from synapse.util.stringutils import random_string
 
-from . import EventBase, FrozenEvent, _event_dict_property
+from . import (
+    _EventInternalMetadata,
+    event_type_from_format_version,
+    room_version_to_event_format,
+)
+
+
+@attr.s(slots=True, cmp=False, frozen=True)
+class EventBuilder(object):
+    """A format independent event builder used to build up the event content
+    before signing the event.
+
+    (Note that while objects of this class are frozen, the
+    content/unsigned/internal_metadata fields are still mutable)
+
+    Attributes:
+        format_version (int): Event format version
+        room_id (str)
+        type (str)
+        sender (str)
+        content (dict)
+        unsigned (dict)
+        internal_metadata (_EventInternalMetadata)
+
+        _state (StateHandler)
+        _auth (synapse.api.Auth)
+        _store (DataStore)
+        _clock (Clock)
+        _hostname (str): The hostname of the server creating the event
+        _signing_key: The signing key to use to sign the event as the server
+    """
+
+    _state = attr.ib()
+    _auth = attr.ib()
+    _store = attr.ib()
+    _clock = attr.ib()
+    _hostname = attr.ib()
+    _signing_key = attr.ib()
+
+    format_version = attr.ib()
+
+    room_id = attr.ib()
+    type = attr.ib()
+    sender = attr.ib()
+
+    content = attr.ib(default=attr.Factory(dict))
+    unsigned = attr.ib(default=attr.Factory(dict))
+
+    # These only exist on a subset of events, so they raise AttributeError if
+    # someone tries to get them when they don't exist.
+    _state_key = attr.ib(default=None)
+    _redacts = attr.ib(default=None)
 
+    internal_metadata = attr.ib(default=attr.Factory(lambda: _EventInternalMetadata({})))
 
-class EventBuilder(EventBase):
-    def __init__(self, key_values={}, internal_metadata_dict={}):
-        signatures = copy.deepcopy(key_values.pop("signatures", {}))
-        unsigned = copy.deepcopy(key_values.pop("unsigned", {}))
+    @property
+    def state_key(self):
+        if self._state_key is not None:
+            return self._state_key
 
-        super(EventBuilder, self).__init__(
-            key_values,
-            signatures=signatures,
-            unsigned=unsigned,
-            internal_metadata_dict=internal_metadata_dict,
+        raise AttributeError("state_key")
+
+    def is_state(self):
+        return self._state_key is not None
+
+    @defer.inlineCallbacks
+    def build(self, prev_event_ids):
+        """Transform into a fully signed and hashed event
+
+        Args:
+            prev_event_ids (list[str]): The event IDs to use as the prev events
+
+        Returns:
+            Deferred[FrozenEvent]
+        """
+
+        state_ids = yield self._state.get_current_state_ids(
+            self.room_id, prev_event_ids,
+        )
+        auth_ids = yield self._auth.compute_auth_events(
+            self, state_ids,
         )
 
-    event_id = _event_dict_property("event_id")
-    state_key = _event_dict_property("state_key")
-    type = _event_dict_property("type")
+        if self.format_version == EventFormatVersions.V1:
+            auth_events = yield self._store.add_event_hashes(auth_ids)
+            prev_events = yield self._store.add_event_hashes(prev_event_ids)
+        else:
+            auth_events = auth_ids
+            prev_events = prev_event_ids
+
+        old_depth = yield self._store.get_max_depth_of(
+            prev_event_ids,
+        )
+        depth = old_depth + 1
+
+        # we cap depth of generated events, to ensure that they are not
+        # rejected by other servers (and so that they can be persisted in
+        # the db)
+        depth = min(depth, MAX_DEPTH)
+
+        event_dict = {
+            "auth_events": auth_events,
+            "prev_events": prev_events,
+            "type": self.type,
+            "room_id": self.room_id,
+            "sender": self.sender,
+            "content": self.content,
+            "unsigned": self.unsigned,
+            "depth": depth,
+            "prev_state": [],
+        }
+
+        if self.is_state():
+            event_dict["state_key"] = self._state_key
 
-    def build(self):
-        return FrozenEvent.from_event(self)
+        if self._redacts is not None:
+            event_dict["redacts"] = self._redacts
+
+        defer.returnValue(
+            create_local_event_from_event_dict(
+                clock=self._clock,
+                hostname=self._hostname,
+                signing_key=self._signing_key,
+                format_version=self.format_version,
+                event_dict=event_dict,
+                internal_metadata_dict=self.internal_metadata.get_dict(),
+            )
+        )
 
 
 class EventBuilderFactory(object):
-    def __init__(self, clock, hostname):
-        self.clock = clock
-        self.hostname = hostname
+    def __init__(self, hs):
+        self.clock = hs.get_clock()
+        self.hostname = hs.hostname
+        self.signing_key = hs.config.signing_key[0]
+
+        self.store = hs.get_datastore()
+        self.state = hs.get_state_handler()
+        self.auth = hs.get_auth()
+
+    def new(self, room_version, key_values):
+        """Generate an event builder appropriate for the given room version
+
+        Args:
+            room_version (str): Version of the room that we're creating an
+                event builder for
+            key_values (dict): Fields used as the basis of the new event
+
+        Returns:
+            EventBuilder
+        """
+
+        # There's currently only the one event version defined
+        if room_version not in KNOWN_ROOM_VERSIONS:
+            raise Exception(
+                "No event format defined for version %r" % (room_version,)
+            )
+
+        return EventBuilder(
+            store=self.store,
+            state=self.state,
+            auth=self.auth,
+            clock=self.clock,
+            hostname=self.hostname,
+            signing_key=self.signing_key,
+            format_version=room_version_to_event_format(room_version),
+            type=key_values["type"],
+            state_key=key_values.get("state_key"),
+            room_id=key_values["room_id"],
+            sender=key_values["sender"],
+            content=key_values.get("content", {}),
+            unsigned=key_values.get("unsigned", {}),
+            redacts=key_values.get("redacts", None),
+        )
+
+
+def create_local_event_from_event_dict(clock, hostname, signing_key,
+                                       format_version, event_dict,
+                                       internal_metadata_dict=None):
+    """Takes a fully formed event dict, ensuring that fields like `origin`
+    and `origin_server_ts` have correct values for a locally produced event,
+    then signs and hashes it.
+
+    Args:
+        clock (Clock)
+        hostname (str)
+        signing_key
+        format_version (int)
+        event_dict (dict)
+        internal_metadata_dict (dict|None)
+
+    Returns:
+        FrozenEvent
+    """
+
+    # There's currently only the one event version defined
+    if format_version not in KNOWN_EVENT_FORMAT_VERSIONS:
+        raise Exception(
+            "No event format defined for version %r" % (format_version,)
+        )
+
+    if internal_metadata_dict is None:
+        internal_metadata_dict = {}
+
+    time_now = int(clock.time_msec())
+
+    if format_version == EventFormatVersions.V1:
+        event_dict["event_id"] = _create_event_id(clock, hostname)
+
+    event_dict["origin"] = hostname
+    event_dict["origin_server_ts"] = time_now
+
+    event_dict.setdefault("unsigned", {})
+    age = event_dict["unsigned"].pop("age", 0)
+    event_dict["unsigned"].setdefault("age_ts", time_now - age)
+
+    event_dict.setdefault("signatures", {})
+
+    add_hashes_and_signatures(
+        event_dict,
+        hostname,
+        signing_key,
+    )
+    return event_type_from_format_version(format_version)(
+        event_dict, internal_metadata_dict=internal_metadata_dict,
+    )
 
-        self.event_id_count = 0
 
-    def create_event_id(self):
-        i = str(self.event_id_count)
-        self.event_id_count += 1
+# A counter used when generating new event IDs
+_event_id_counter = 0
 
-        local_part = str(int(self.clock.time())) + i + random_string(5)
 
-        e_id = EventID(local_part, self.hostname)
+def _create_event_id(clock, hostname):
+    """Create a new event ID
 
-        return e_id.to_string()
+    Args:
+        clock (Clock)
+        hostname (str): The server name for the event ID
 
-    def new(self, key_values={}):
-        key_values["event_id"] = self.create_event_id()
+    Returns:
+        str
+    """
 
-        time_now = int(self.clock.time_msec())
+    global _event_id_counter
 
-        key_values.setdefault("origin", self.hostname)
-        key_values.setdefault("origin_server_ts", time_now)
+    i = str(_event_id_counter)
+    _event_id_counter += 1
 
-        key_values.setdefault("unsigned", {})
-        age = key_values["unsigned"].pop("age", 0)
-        key_values["unsigned"].setdefault("age_ts", time_now - age)
+    local_part = str(int(clock.time())) + i + random_string(5)
 
-        key_values["signatures"] = {}
+    e_id = EventID(local_part, hostname)
 
-        return EventBuilder(key_values=key_values,)
+    return e_id.to_string()
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 652941ca0d..07fccdd8f9 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -38,8 +38,31 @@ def prune_event(event):
     This is used when we "redact" an event. We want to remove all fields that
     the user has specified, but we do want to keep necessary information like
     type, state_key etc.
+
+    Args:
+        event (FrozenEvent)
+
+    Returns:
+        FrozenEvent
+    """
+    pruned_event_dict = prune_event_dict(event.get_dict())
+
+    from . import event_type_from_format_version
+    return event_type_from_format_version(event.format_version)(
+        pruned_event_dict, event.internal_metadata.get_dict()
+    )
+
+
+def prune_event_dict(event_dict):
+    """Redacts the event_dict in the same way as `prune_event`, except it
+    operates on dicts rather than event objects
+
+    Args:
+        event_dict (dict)
+
+    Returns:
+        dict: A copy of the pruned event dict
     """
-    event_type = event.type
 
     allowed_keys = [
         "event_id",
@@ -59,13 +82,13 @@ def prune_event(event):
         "membership",
     ]
 
-    event_dict = event.get_dict()
+    event_type = event_dict["type"]
 
     new_content = {}
 
     def add_fields(*fields):
         for field in fields:
-            if field in event.content:
+            if field in event_dict["content"]:
                 new_content[field] = event_dict["content"][field]
 
     if event_type == EventTypes.Member:
@@ -98,17 +121,17 @@ def prune_event(event):
 
     allowed_fields["content"] = new_content
 
-    allowed_fields["unsigned"] = {}
+    unsigned = {}
+    allowed_fields["unsigned"] = unsigned
 
-    if "age_ts" in event.unsigned:
-        allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
-    if "replaces_state" in event.unsigned:
-        allowed_fields["unsigned"]["replaces_state"] = event.unsigned["replaces_state"]
+    event_unsigned = event_dict.get("unsigned", {})
 
-    return type(event)(
-        allowed_fields,
-        internal_metadata_dict=event.internal_metadata.get_dict()
-    )
+    if "age_ts" in event_unsigned:
+        unsigned["age_ts"] = event_unsigned["age_ts"]
+    if "replaces_state" in event_unsigned:
+        unsigned["replaces_state"] = event_unsigned["replaces_state"]
+
+    return allowed_fields
 
 
 def _copy_field(src, dst, field):
@@ -244,6 +267,7 @@ def serialize_event(e, time_now_ms, as_client_event=True,
     Returns:
         dict
     """
+
     # FIXME(erikj): To handle the case of presence events and the like
     if not isinstance(e, EventBase):
         return e
@@ -253,6 +277,8 @@ def serialize_event(e, time_now_ms, as_client_event=True,
     # Should this strip out None's?
     d = {k: v for k, v in e.get_dict().items()}
 
+    d["event_id"] = e.event_id
+
     if "age_ts" in d["unsigned"]:
         d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"]
         del d["unsigned"]["age_ts"]
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index cf184748a1..a072674b02 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -15,23 +15,29 @@
 
 from six import string_types
 
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventFormatVersions, EventTypes, Membership
 from synapse.api.errors import SynapseError
 from synapse.types import EventID, RoomID, UserID
 
 
 class EventValidator(object):
+    def validate_new(self, event):
+        """Validates the event has roughly the right format
 
-    def validate(self, event):
-        EventID.from_string(event.event_id)
-        RoomID.from_string(event.room_id)
+        Args:
+            event (FrozenEvent)
+        """
+        self.validate_builder(event)
+
+        if event.format_version == EventFormatVersions.V1:
+            EventID.from_string(event.event_id)
 
         required = [
-            # "auth_events",
+            "auth_events",
             "content",
-            # "hashes",
+            "hashes",
             "origin",
-            # "prev_events",
+            "prev_events",
             "sender",
             "type",
         ]
@@ -41,8 +47,25 @@ class EventValidator(object):
                 raise SynapseError(400, "Event does not have key %s" % (k,))
 
         # Check that the following keys have string values
-        strings = [
+        event_strings = [
             "origin",
+        ]
+
+        for s in event_strings:
+            if not isinstance(getattr(event, s), string_types):
+                raise SynapseError(400, "'%s' not a string type" % (s,))
+
+    def validate_builder(self, event):
+        """Validates that the builder/event has roughly the right format. Only
+        checks values that we expect a proto event to have, rather than all the
+        fields an event would have
+
+        Args:
+            event (EventBuilder|FrozenEvent)
+        """
+
+        strings = [
+            "room_id",
             "sender",
             "type",
         ]
@@ -54,22 +77,7 @@ class EventValidator(object):
             if not isinstance(getattr(event, s), string_types):
                 raise SynapseError(400, "Not '%s' a string type" % (s,))
 
-        if event.type == EventTypes.Member:
-            if "membership" not in event.content:
-                raise SynapseError(400, "Content has not membership key")
-
-            if event.content["membership"] not in Membership.LIST:
-                raise SynapseError(400, "Invalid membership key")
-
-        # Check that the following keys have dictionary values
-        # TODO
-
-        # Check that the following keys have the correct format for DAGs
-        # TODO
-
-    def validate_new(self, event):
-        self.validate(event)
-
+        RoomID.from_string(event.room_id)
         UserID.from_string(event.sender)
 
         if event.type == EventTypes.Message:
@@ -86,9 +94,16 @@ class EventValidator(object):
         elif event.type == EventTypes.Name:
             self._ensure_strings(event.content, ["name"])
 
+        elif event.type == EventTypes.Member:
+            if "membership" not in event.content:
+                raise SynapseError(400, "Content has not membership key")
+
+            if event.content["membership"] not in Membership.LIST:
+                raise SynapseError(400, "Invalid membership key")
+
     def _ensure_strings(self, d, keys):
         for s in keys:
             if s not in d:
                 raise SynapseError(400, "'%s' not in content" % (s,))
             if not isinstance(d[s], string_types):
-                raise SynapseError(400, "Not '%s' a string type" % (s,))
+                raise SynapseError(400, "'%s' not a string type" % (s,))
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index b7ad729c63..a7a2ec4523 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -20,10 +20,10 @@ import six
 from twisted.internet import defer
 from twisted.internet.defer import DeferredList
 
-from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
+from synapse.api.constants import MAX_DEPTH, EventTypes, Membership, RoomVersions
 from synapse.api.errors import Codes, SynapseError
 from synapse.crypto.event_signing import check_event_content_hash
-from synapse.events import FrozenEvent
+from synapse.events import event_type_from_format_version
 from synapse.events.utils import prune_event
 from synapse.http.servlet import assert_params_in_dict
 from synapse.types import get_domain_from_id
@@ -43,8 +43,8 @@ class FederationBase(object):
         self._clock = hs.get_clock()
 
     @defer.inlineCallbacks
-    def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
-                                       include_none=False):
+    def _check_sigs_and_hash_and_fetch(self, origin, pdus, room_version,
+                                       outlier=False, include_none=False):
         """Takes a list of PDUs and checks the signatures and hashs of each
         one. If a PDU fails its signature check then we check if we have it in
         the database and if not then request if from the originating server of
@@ -56,13 +56,17 @@ class FederationBase(object):
         a new list.
 
         Args:
+            origin (str)
             pdu (list)
-            outlier (bool)
+            room_version (str)
+            outlier (bool): Whether the events are outliers or not
+            include_none (str): Whether to include None in the returned list
+                for events that have failed their checks
 
         Returns:
             Deferred : A list of PDUs that have valid signatures and hashes.
         """
-        deferreds = self._check_sigs_and_hashes(pdus)
+        deferreds = self._check_sigs_and_hashes(room_version, pdus)
 
         @defer.inlineCallbacks
         def handle_check_result(pdu, deferred):
@@ -84,6 +88,7 @@ class FederationBase(object):
                     res = yield self.get_pdu(
                         destinations=[pdu.origin],
                         event_id=pdu.event_id,
+                        room_version=room_version,
                         outlier=outlier,
                         timeout=10000,
                     )
@@ -116,16 +121,17 @@ class FederationBase(object):
         else:
             defer.returnValue([p for p in valid_pdus if p])
 
-    def _check_sigs_and_hash(self, pdu):
+    def _check_sigs_and_hash(self, room_version, pdu):
         return logcontext.make_deferred_yieldable(
-            self._check_sigs_and_hashes([pdu])[0],
+            self._check_sigs_and_hashes(room_version, [pdu])[0],
         )
 
-    def _check_sigs_and_hashes(self, pdus):
+    def _check_sigs_and_hashes(self, room_version, pdus):
         """Checks that each of the received events is correctly signed by the
         sending server.
 
         Args:
+            room_version (str): The room version of the PDUs
             pdus (list[FrozenEvent]): the events to be checked
 
         Returns:
@@ -136,7 +142,7 @@ class FederationBase(object):
               * throws a SynapseError if the signature check failed.
             The deferreds run their callbacks in the sentinel logcontext.
         """
-        deferreds = _check_sigs_on_pdus(self.keyring, pdus)
+        deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
 
         ctx = logcontext.LoggingContext.current_context()
 
@@ -198,16 +204,17 @@ class FederationBase(object):
 
 
 class PduToCheckSig(namedtuple("PduToCheckSig", [
-    "pdu", "redacted_pdu_json", "event_id_domain", "sender_domain", "deferreds",
+    "pdu", "redacted_pdu_json", "sender_domain", "deferreds",
 ])):
     pass
 
 
-def _check_sigs_on_pdus(keyring, pdus):
+def _check_sigs_on_pdus(keyring, room_version, pdus):
     """Check that the given events are correctly signed
 
     Args:
         keyring (synapse.crypto.Keyring): keyring object to do the checks
+        room_version (str): the room version of the PDUs
         pdus (Collection[EventBase]): the events to be checked
 
     Returns:
@@ -220,9 +227,7 @@ def _check_sigs_on_pdus(keyring, pdus):
 
     # we want to check that the event is signed by:
     #
-    # (a) the server which created the event_id
-    #
-    # (b) the sender's server.
+    # (a) the sender's server
     #
     #     - except in the case of invites created from a 3pid invite, which are exempt
     #     from this check, because the sender has to match that of the original 3pid
@@ -236,34 +241,26 @@ def _check_sigs_on_pdus(keyring, pdus):
     #     and signatures are *supposed* to be valid whether or not an event has been
     #     redacted. But this isn't the worst of the ways that 3pid invites are broken.
     #
+    # (b) for V1 and V2 rooms, the server which created the event_id
+    #
     # let's start by getting the domain for each pdu, and flattening the event back
     # to JSON.
+
     pdus_to_check = [
         PduToCheckSig(
             pdu=p,
             redacted_pdu_json=prune_event(p).get_pdu_json(),
-            event_id_domain=get_domain_from_id(p.event_id),
             sender_domain=get_domain_from_id(p.sender),
             deferreds=[],
         )
         for p in pdus
     ]
 
-    # first make sure that the event is signed by the event_id's domain
-    deferreds = keyring.verify_json_objects_for_server([
-        (p.event_id_domain, p.redacted_pdu_json)
-        for p in pdus_to_check
-    ])
-
-    for p, d in zip(pdus_to_check, deferreds):
-        p.deferreds.append(d)
-
-    # now let's look for events where the sender's domain is different to the
-    # event id's domain (normally only the case for joins/leaves), and add additional
-    # checks.
+    # First we check that the sender event is signed by the sender's domain
+    # (except if its a 3pid invite, in which case it may be sent by any server)
     pdus_to_check_sender = [
         p for p in pdus_to_check
-        if p.sender_domain != p.event_id_domain and not _is_invite_via_3pid(p.pdu)
+        if not _is_invite_via_3pid(p.pdu)
     ]
 
     more_deferreds = keyring.verify_json_objects_for_server([
@@ -274,19 +271,43 @@ def _check_sigs_on_pdus(keyring, pdus):
     for p, d in zip(pdus_to_check_sender, more_deferreds):
         p.deferreds.append(d)
 
+    # now let's look for events where the sender's domain is different to the
+    # event id's domain (normally only the case for joins/leaves), and add additional
+    # checks. Only do this if the room version has a concept of event ID domain
+    if room_version in (
+        RoomVersions.V1, RoomVersions.V2, RoomVersions.STATE_V2_TEST,
+    ):
+        pdus_to_check_event_id = [
+            p for p in pdus_to_check
+            if p.sender_domain != get_domain_from_id(p.pdu.event_id)
+        ]
+
+        more_deferreds = keyring.verify_json_objects_for_server([
+            (get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json)
+            for p in pdus_to_check_event_id
+        ])
+
+        for p, d in zip(pdus_to_check_event_id, more_deferreds):
+            p.deferreds.append(d)
+    elif room_version in (RoomVersions.V3,):
+        pass  # No further checks needed, as event IDs are hashes here
+    else:
+        raise RuntimeError("Unrecognized room version %s" % (room_version,))
+
     # replace lists of deferreds with single Deferreds
     return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
 
 
 def _flatten_deferred_list(deferreds):
-    """Given a list of one or more deferreds, either return the single deferred, or
-    combine into a DeferredList.
+    """Given a list of deferreds, either return the single deferred,
+    combine into a DeferredList, or return an already resolved deferred.
     """
     if len(deferreds) > 1:
         return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
-    else:
-        assert len(deferreds) == 1
+    elif len(deferreds) == 1:
         return deferreds[0]
+    else:
+        return defer.succeed(None)
 
 
 def _is_invite_via_3pid(event):
@@ -297,11 +318,12 @@ def _is_invite_via_3pid(event):
     )
 
 
-def event_from_pdu_json(pdu_json, outlier=False):
+def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
     """Construct a FrozenEvent from an event json received over federation
 
     Args:
         pdu_json (object): pdu as received over federation
+        event_format_version (int): The event format version
         outlier (bool): True to mark this event as an outlier
 
     Returns:
@@ -313,7 +335,7 @@ def event_from_pdu_json(pdu_json, outlier=False):
     """
     # we could probably enforce a bunch of other fields here (room_id, sender,
     # origin, etc etc)
-    assert_params_in_dict(pdu_json, ('event_id', 'type', 'depth'))
+    assert_params_in_dict(pdu_json, ('type', 'depth'))
 
     depth = pdu_json['depth']
     if not isinstance(depth, six.integer_types):
@@ -325,8 +347,8 @@ def event_from_pdu_json(pdu_json, outlier=False):
     elif depth > MAX_DEPTH:
         raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
 
-    event = FrozenEvent(
-        pdu_json
+    event = event_type_from_format_version(event_format_version)(
+        pdu_json,
     )
 
     event.internal_metadata.outlier = outlier
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index d05ed91d64..58e04d81ab 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -25,14 +25,20 @@ from prometheus_client import Counter
 
 from twisted.internet import defer
 
-from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
+from synapse.api.constants import (
+    KNOWN_ROOM_VERSIONS,
+    EventTypes,
+    Membership,
+    RoomVersions,
+)
 from synapse.api.errors import (
     CodeMessageException,
+    Codes,
     FederationDeniedError,
     HttpResponseException,
     SynapseError,
 )
-from synapse.events import builder
+from synapse.events import builder, room_version_to_event_format
 from synapse.federation.federation_base import FederationBase, event_from_pdu_json
 from synapse.util import logcontext, unwrapFirstError
 from synapse.util.caches.expiringcache import ExpiringCache
@@ -66,6 +72,9 @@ class FederationClient(FederationBase):
         self.state = hs.get_state_handler()
         self.transport_layer = hs.get_federation_transport_client()
 
+        self.hostname = hs.hostname
+        self.signing_key = hs.config.signing_key[0]
+
         self._get_pdu_cache = ExpiringCache(
             cache_name="get_pdu_cache",
             clock=self._clock,
@@ -162,13 +171,13 @@ class FederationClient(FederationBase):
 
     @defer.inlineCallbacks
     @log_function
-    def backfill(self, dest, context, limit, extremities):
+    def backfill(self, dest, room_id, limit, extremities):
         """Requests some more historic PDUs for the given context from the
         given destination server.
 
         Args:
             dest (str): The remote home server to ask.
-            context (str): The context to backfill.
+            room_id (str): The room_id to backfill.
             limit (int): The maximum number of PDUs to return.
             extremities (list): List of PDU id and origins of the first pdus
                 we have seen from the context
@@ -183,18 +192,21 @@ class FederationClient(FederationBase):
             return
 
         transaction_data = yield self.transport_layer.backfill(
-            dest, context, extremities, limit)
+            dest, room_id, extremities, limit)
 
         logger.debug("backfill transaction_data=%s", repr(transaction_data))
 
+        room_version = yield self.store.get_room_version(room_id)
+        format_ver = room_version_to_event_format(room_version)
+
         pdus = [
-            event_from_pdu_json(p, outlier=False)
+            event_from_pdu_json(p, format_ver, outlier=False)
             for p in transaction_data["pdus"]
         ]
 
         # FIXME: We should handle signature failures more gracefully.
         pdus[:] = yield logcontext.make_deferred_yieldable(defer.gatherResults(
-            self._check_sigs_and_hashes(pdus),
+            self._check_sigs_and_hashes(room_version, pdus),
             consumeErrors=True,
         ).addErrback(unwrapFirstError))
 
@@ -202,7 +214,8 @@ class FederationClient(FederationBase):
 
     @defer.inlineCallbacks
     @log_function
-    def get_pdu(self, destinations, event_id, outlier=False, timeout=None):
+    def get_pdu(self, destinations, event_id, room_version, outlier=False,
+                timeout=None):
         """Requests the PDU with given origin and ID from the remote home
         servers.
 
@@ -212,6 +225,7 @@ class FederationClient(FederationBase):
         Args:
             destinations (list): Which home servers to query
             event_id (str): event to fetch
+            room_version (str): version of the room
             outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
                 it's from an arbitary point in the context as opposed to part
                 of the current block of PDUs. Defaults to `False`
@@ -230,6 +244,8 @@ class FederationClient(FederationBase):
 
         pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
 
+        format_ver = room_version_to_event_format(room_version)
+
         signed_pdu = None
         for destination in destinations:
             now = self._clock.time_msec()
@@ -245,7 +261,7 @@ class FederationClient(FederationBase):
                 logger.debug("transaction_data %r", transaction_data)
 
                 pdu_list = [
-                    event_from_pdu_json(p, outlier=outlier)
+                    event_from_pdu_json(p, format_ver, outlier=outlier)
                     for p in transaction_data["pdus"]
                 ]
 
@@ -253,7 +269,7 @@ class FederationClient(FederationBase):
                     pdu = pdu_list[0]
 
                     # Check signatures are correct.
-                    signed_pdu = yield self._check_sigs_and_hash(pdu)
+                    signed_pdu = yield self._check_sigs_and_hash(room_version, pdu)
 
                     break
 
@@ -339,12 +355,16 @@ class FederationClient(FederationBase):
             destination, room_id, event_id=event_id,
         )
 
+        room_version = yield self.store.get_room_version(room_id)
+        format_ver = room_version_to_event_format(room_version)
+
         pdus = [
-            event_from_pdu_json(p, outlier=True) for p in result["pdus"]
+            event_from_pdu_json(p, format_ver, outlier=True)
+            for p in result["pdus"]
         ]
 
         auth_chain = [
-            event_from_pdu_json(p, outlier=True)
+            event_from_pdu_json(p, format_ver, outlier=True)
             for p in result.get("auth_chain", [])
         ]
 
@@ -355,7 +375,8 @@ class FederationClient(FederationBase):
         signed_pdus = yield self._check_sigs_and_hash_and_fetch(
             destination,
             [p for p in pdus if p.event_id not in seen_events],
-            outlier=True
+            outlier=True,
+            room_version=room_version,
         )
         signed_pdus.extend(
             seen_events[p.event_id] for p in pdus if p.event_id in seen_events
@@ -364,7 +385,8 @@ class FederationClient(FederationBase):
         signed_auth = yield self._check_sigs_and_hash_and_fetch(
             destination,
             [p for p in auth_chain if p.event_id not in seen_events],
-            outlier=True
+            outlier=True,
+            room_version=room_version,
         )
         signed_auth.extend(
             seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
@@ -411,6 +433,8 @@ class FederationClient(FederationBase):
             random.shuffle(srvs)
             return srvs
 
+        room_version = yield self.store.get_room_version(room_id)
+
         batch_size = 20
         missing_events = list(missing_events)
         for i in range(0, len(missing_events), batch_size):
@@ -421,6 +445,7 @@ class FederationClient(FederationBase):
                     self.get_pdu,
                     destinations=random_server_list(),
                     event_id=e_id,
+                    room_version=room_version,
                 )
                 for e_id in batch
             ]
@@ -445,13 +470,17 @@ class FederationClient(FederationBase):
             destination, room_id, event_id,
         )
 
+        room_version = yield self.store.get_room_version(room_id)
+        format_ver = room_version_to_event_format(room_version)
+
         auth_chain = [
-            event_from_pdu_json(p, outlier=True)
+            event_from_pdu_json(p, format_ver, outlier=True)
             for p in res["auth_chain"]
         ]
 
         signed_auth = yield self._check_sigs_and_hash_and_fetch(
-            destination, auth_chain, outlier=True
+            destination, auth_chain,
+            outlier=True, room_version=room_version,
         )
 
         signed_auth.sort(key=lambda e: e.depth)
@@ -522,6 +551,8 @@ class FederationClient(FederationBase):
         Does so by asking one of the already participating servers to create an
         event with proper context.
 
+        Returns a fully signed and hashed event.
+
         Note that this does not append any events to any graphs.
 
         Args:
@@ -536,8 +567,10 @@ class FederationClient(FederationBase):
             params (dict[str, str|Iterable[str]]): Query parameters to include in the
                 request.
         Return:
-            Deferred: resolves to a tuple of (origin (str), event (object))
-            where origin is the remote homeserver which generated the event.
+            Deferred[tuple[str, FrozenEvent, int]]: resolves to a tuple of
+            `(origin, event, event_format)` where origin is the remote
+            homeserver which generated the event, and event_format is one of
+            `synapse.api.constants.EventFormatVersions`.
 
             Fails with a ``SynapseError`` if the chosen remote server
             returns a 300/400 code.
@@ -557,6 +590,11 @@ class FederationClient(FederationBase):
                 destination, room_id, user_id, membership, params,
             )
 
+            # Note: If not supplied, the room version may be either v1 or v2,
+            # however either way the event format version will be v1.
+            room_version = ret.get("room_version", RoomVersions.V1)
+            event_format = room_version_to_event_format(room_version)
+
             pdu_dict = ret.get("event", None)
             if not isinstance(pdu_dict, dict):
                 raise InvalidResponseError("Bad 'event' field in response")
@@ -571,17 +609,20 @@ class FederationClient(FederationBase):
             if "prev_state" not in pdu_dict:
                 pdu_dict["prev_state"] = []
 
-            ev = builder.EventBuilder(pdu_dict)
+            ev = builder.create_local_event_from_event_dict(
+                self._clock, self.hostname, self.signing_key,
+                format_version=event_format, event_dict=pdu_dict,
+            )
 
             defer.returnValue(
-                (destination, ev)
+                (destination, ev, event_format)
             )
 
         return self._try_destination_list(
             "make_" + membership, destinations, send_request,
         )
 
-    def send_join(self, destinations, pdu):
+    def send_join(self, destinations, pdu, event_format_version):
         """Sends a join event to one of a list of homeservers.
 
         Doing so will cause the remote server to add the event to the graph,
@@ -591,6 +632,7 @@ class FederationClient(FederationBase):
             destinations (str): Candidate homeservers which are probably
                 participating in the room.
             pdu (BaseEvent): event to be sent
+            event_format_version (int): The event format version
 
         Return:
             Deferred: resolves to a dict with members ``origin`` (a string
@@ -636,12 +678,12 @@ class FederationClient(FederationBase):
             logger.debug("Got content: %s", content)
 
             state = [
-                event_from_pdu_json(p, outlier=True)
+                event_from_pdu_json(p, event_format_version, outlier=True)
                 for p in content.get("state", [])
             ]
 
             auth_chain = [
-                event_from_pdu_json(p, outlier=True)
+                event_from_pdu_json(p, event_format_version, outlier=True)
                 for p in content.get("auth_chain", [])
             ]
 
@@ -650,9 +692,21 @@ class FederationClient(FederationBase):
                 for p in itertools.chain(state, auth_chain)
             }
 
+            room_version = None
+            for e in state:
+                if (e.type, e.state_key) == (EventTypes.Create, ""):
+                    room_version = e.content.get("room_version", RoomVersions.V1)
+                    break
+
+            if room_version is None:
+                # If the state doesn't have a create event then the room is
+                # invalid, and it would fail auth checks anyway.
+                raise SynapseError(400, "No create event in state")
+
             valid_pdus = yield self._check_sigs_and_hash_and_fetch(
                 destination, list(pdus.values()),
                 outlier=True,
+                room_version=room_version,
             )
 
             valid_pdus_map = {
@@ -690,32 +744,90 @@ class FederationClient(FederationBase):
 
     @defer.inlineCallbacks
     def send_invite(self, destination, room_id, event_id, pdu):
-        time_now = self._clock.time_msec()
-        try:
-            code, content = yield self.transport_layer.send_invite(
-                destination=destination,
-                room_id=room_id,
-                event_id=event_id,
-                content=pdu.get_pdu_json(time_now),
-            )
-        except HttpResponseException as e:
-            if e.code == 403:
-                raise e.to_synapse_error()
-            raise
+        room_version = yield self.store.get_room_version(room_id)
+
+        content = yield self._do_send_invite(destination, pdu, room_version)
 
         pdu_dict = content["event"]
 
         logger.debug("Got response to send_invite: %s", pdu_dict)
 
-        pdu = event_from_pdu_json(pdu_dict)
+        room_version = yield self.store.get_room_version(room_id)
+        format_ver = room_version_to_event_format(room_version)
+
+        pdu = event_from_pdu_json(pdu_dict, format_ver)
 
         # Check signatures are correct.
-        pdu = yield self._check_sigs_and_hash(pdu)
+        pdu = yield self._check_sigs_and_hash(room_version, pdu)
 
         # FIXME: We should handle signature failures more gracefully.
 
         defer.returnValue(pdu)
 
+    @defer.inlineCallbacks
+    def _do_send_invite(self, destination, pdu, room_version):
+        """Actually sends the invite, first trying v2 API and falling back to
+        v1 API if necessary.
+
+        Args:
+            destination (str): Target server
+            pdu (FrozenEvent)
+            room_version (str)
+
+        Returns:
+            dict: The event as a dict as returned by the remote server
+        """
+        time_now = self._clock.time_msec()
+
+        try:
+            content = yield self.transport_layer.send_invite_v2(
+                destination=destination,
+                room_id=pdu.room_id,
+                event_id=pdu.event_id,
+                content={
+                    "event": pdu.get_pdu_json(time_now),
+                    "room_version": room_version,
+                    "invite_room_state": pdu.unsigned.get("invite_room_state", []),
+                },
+            )
+            defer.returnValue(content)
+        except HttpResponseException as e:
+            if e.code in [400, 404]:
+                err = e.to_synapse_error()
+
+                # If we receive an error response that isn't a generic error, we
+                # assume that the remote understands the v2 invite API and this
+                # is a legitimate error.
+                if err.errcode != Codes.UNKNOWN:
+                    raise err
+
+                # Otherwise, we assume that the remote server doesn't understand
+                # the v2 invite API.
+
+                if room_version in (RoomVersions.V1, RoomVersions.V2):
+                    pass  # We'll fall through
+                else:
+                    raise SynapseError(
+                        400,
+                        "User's homeserver does not support this room version",
+                        Codes.UNSUPPORTED_ROOM_VERSION,
+                    )
+            elif e.code == 403:
+                raise e.to_synapse_error()
+            else:
+                raise
+
+        # Didn't work, try v1 API.
+        # Note the v1 API returns a tuple of `(200, content)`
+
+        _, content = yield self.transport_layer.send_invite_v1(
+            destination=destination,
+            room_id=pdu.room_id,
+            event_id=pdu.event_id,
+            content=pdu.get_pdu_json(time_now),
+        )
+        defer.returnValue(content)
+
     def send_leave(self, destinations, pdu):
         """Sends a leave event to one of a list of homeservers.
 
@@ -785,13 +897,16 @@ class FederationClient(FederationBase):
             content=send_content,
         )
 
+        room_version = yield self.store.get_room_version(room_id)
+        format_ver = room_version_to_event_format(room_version)
+
         auth_chain = [
-            event_from_pdu_json(e)
+            event_from_pdu_json(e, format_ver)
             for e in content["auth_chain"]
         ]
 
         signed_auth = yield self._check_sigs_and_hash_and_fetch(
-            destination, auth_chain, outlier=True
+            destination, auth_chain, outlier=True, room_version=room_version,
         )
 
         signed_auth.sort(key=lambda e: e.depth)
@@ -833,13 +948,16 @@ class FederationClient(FederationBase):
                 timeout=timeout,
             )
 
+            room_version = yield self.store.get_room_version(room_id)
+            format_ver = room_version_to_event_format(room_version)
+
             events = [
-                event_from_pdu_json(e)
+                event_from_pdu_json(e, format_ver)
                 for e in content.get("events", [])
             ]
 
             signed_events = yield self._check_sigs_and_hash_and_fetch(
-                destination, events, outlier=False
+                destination, events, outlier=False, room_version=room_version,
             )
         except HttpResponseException as e:
             if not e.code == 400:
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 0f9302a6a8..569eb277a9 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -25,15 +25,17 @@ from twisted.internet import defer
 from twisted.internet.abstract import isIPAddress
 from twisted.python import failure
 
-from synapse.api.constants import EventTypes
+from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
 from synapse.api.errors import (
     AuthError,
+    Codes,
     FederationError,
     IncompatibleRoomVersionError,
     NotFoundError,
     SynapseError,
 )
 from synapse.crypto.event_signing import compute_event_signature
+from synapse.events import room_version_to_event_format
 from synapse.federation.federation_base import FederationBase, event_from_pdu_json
 from synapse.federation.persistence import TransactionActions
 from synapse.federation.units import Edu, Transaction
@@ -147,6 +149,22 @@ class FederationServer(FederationBase):
 
         logger.debug("[%s] Transaction is new", transaction.transaction_id)
 
+        # Reject if PDU count > 50 and EDU count > 100
+        if (len(transaction.pdus) > 50
+                or (hasattr(transaction, "edus") and len(transaction.edus) > 100)):
+
+            logger.info(
+                "Transaction PDU or EDU count too large. Returning 400",
+            )
+
+            response = {}
+            yield self.transaction_actions.set_response(
+                origin,
+                transaction,
+                400, response
+            )
+            defer.returnValue((400, response))
+
         received_pdus_counter.inc(len(transaction.pdus))
 
         origin_host, _ = parse_server_name(origin)
@@ -162,8 +180,29 @@ class FederationServer(FederationBase):
                 p["age_ts"] = request_time - int(p["age"])
                 del p["age"]
 
-            event = event_from_pdu_json(p)
-            room_id = event.room_id
+            # We try and pull out an event ID so that if later checks fail we
+            # can log something sensible. We don't mandate an event ID here in
+            # case future event formats get rid of the key.
+            possible_event_id = p.get("event_id", "<Unknown>")
+
+            # Now we get the room ID so that we can check that we know the
+            # version of the room.
+            room_id = p.get("room_id")
+            if not room_id:
+                logger.info(
+                    "Ignoring PDU as does not have a room_id. Event ID: %s",
+                    possible_event_id,
+                )
+                continue
+
+            try:
+                room_version = yield self.store.get_room_version(room_id)
+                format_ver = room_version_to_event_format(room_version)
+            except NotFoundError:
+                logger.info("Ignoring PDU for unknown room_id: %s", room_id)
+                continue
+
+            event = event_from_pdu_json(p, format_ver)
             pdus_by_room.setdefault(room_id, []).append(event)
 
         pdu_results = {}
@@ -201,8 +240,9 @@ class FederationServer(FederationBase):
                         f = failure.Failure()
                         pdu_results[event_id] = {"error": str(e)}
                         logger.error(
-                            "Failed to handle PDU %s: %s",
-                            event_id, f.getTraceback().rstrip(),
+                            "Failed to handle PDU %s",
+                            event_id,
+                            exc_info=(f.type, f.value, f.getTracebackObject()),
                         )
 
         yield concurrently_execute(
@@ -300,7 +340,7 @@ class FederationServer(FederationBase):
             if self.hs.is_mine_id(event.event_id):
                 event.signatures.update(
                     compute_event_signature(
-                        event,
+                        event.get_pdu_json(),
                         self.hs.hostname,
                         self.hs.config.signing_key[0]
                     )
@@ -324,11 +364,6 @@ class FederationServer(FederationBase):
             defer.returnValue((404, ""))
 
     @defer.inlineCallbacks
-    @log_function
-    def on_pull_request(self, origin, versions):
-        raise NotImplementedError("Pull transactions not implemented")
-
-    @defer.inlineCallbacks
     def on_query_request(self, query_type, args):
         received_queries_counter.labels(query_type).inc()
         resp = yield self.registry.on_query(query_type, args)
@@ -352,18 +387,30 @@ class FederationServer(FederationBase):
         })
 
     @defer.inlineCallbacks
-    def on_invite_request(self, origin, content):
-        pdu = event_from_pdu_json(content)
+    def on_invite_request(self, origin, content, room_version):
+        if room_version not in KNOWN_ROOM_VERSIONS:
+            raise SynapseError(
+                400,
+                "Homeserver does not support this room version",
+                Codes.UNSUPPORTED_ROOM_VERSION,
+            )
+
+        format_ver = room_version_to_event_format(room_version)
+
+        pdu = event_from_pdu_json(content, format_ver)
         origin_host, _ = parse_server_name(origin)
         yield self.check_server_matches_acl(origin_host, pdu.room_id)
         ret_pdu = yield self.handler.on_invite_request(origin, pdu)
         time_now = self._clock.time_msec()
-        defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
+        defer.returnValue({"event": ret_pdu.get_pdu_json(time_now)})
 
     @defer.inlineCallbacks
-    def on_send_join_request(self, origin, content):
+    def on_send_join_request(self, origin, content, room_id):
         logger.debug("on_send_join_request: content: %s", content)
-        pdu = event_from_pdu_json(content)
+
+        room_version = yield self.store.get_room_version(room_id)
+        format_ver = room_version_to_event_format(room_version)
+        pdu = event_from_pdu_json(content, format_ver)
 
         origin_host, _ = parse_server_name(origin)
         yield self.check_server_matches_acl(origin_host, pdu.room_id)
@@ -383,13 +430,22 @@ class FederationServer(FederationBase):
         origin_host, _ = parse_server_name(origin)
         yield self.check_server_matches_acl(origin_host, room_id)
         pdu = yield self.handler.on_make_leave_request(room_id, user_id)
+
+        room_version = yield self.store.get_room_version(room_id)
+
         time_now = self._clock.time_msec()
-        defer.returnValue({"event": pdu.get_pdu_json(time_now)})
+        defer.returnValue({
+            "event": pdu.get_pdu_json(time_now),
+            "room_version": room_version,
+        })
 
     @defer.inlineCallbacks
-    def on_send_leave_request(self, origin, content):
+    def on_send_leave_request(self, origin, content, room_id):
         logger.debug("on_send_leave_request: content: %s", content)
-        pdu = event_from_pdu_json(content)
+
+        room_version = yield self.store.get_room_version(room_id)
+        format_ver = room_version_to_event_format(room_version)
+        pdu = event_from_pdu_json(content, format_ver)
 
         origin_host, _ = parse_server_name(origin)
         yield self.check_server_matches_acl(origin_host, pdu.room_id)
@@ -435,13 +491,16 @@ class FederationServer(FederationBase):
             origin_host, _ = parse_server_name(origin)
             yield self.check_server_matches_acl(origin_host, room_id)
 
+            room_version = yield self.store.get_room_version(room_id)
+            format_ver = room_version_to_event_format(room_version)
+
             auth_chain = [
-                event_from_pdu_json(e)
+                event_from_pdu_json(e, format_ver)
                 for e in content["auth_chain"]
             ]
 
             signed_auth = yield self._check_sigs_and_hash_and_fetch(
-                origin, auth_chain, outlier=True
+                origin, auth_chain, outlier=True, room_version=room_version,
             )
 
             ret = yield self.handler.on_query_auth(
@@ -586,16 +645,19 @@ class FederationServer(FederationBase):
         """
         # check that it's actually being sent from a valid destination to
         # workaround bug #1753 in 0.18.5 and 0.18.6
-        if origin != get_domain_from_id(pdu.event_id):
+        if origin != get_domain_from_id(pdu.sender):
             # We continue to accept join events from any server; this is
             # necessary for the federation join dance to work correctly.
             # (When we join over federation, the "helper" server is
             # responsible for sending out the join event, rather than the
-            # origin. See bug #1893).
+            # origin. See bug #1893. This is also true for some third party
+            # invites).
             if not (
                 pdu.type == 'm.room.member' and
                 pdu.content and
-                pdu.content.get("membership", None) == 'join'
+                pdu.content.get("membership", None) in (
+                    Membership.JOIN, Membership.INVITE,
+                )
             ):
                 logger.info(
                     "Discarding PDU %s from invalid origin %s",
@@ -608,9 +670,12 @@ class FederationServer(FederationBase):
                     pdu.event_id, origin
                 )
 
+        # We've already checked that we know the room version by this point
+        room_version = yield self.store.get_room_version(pdu.room_id)
+
         # Check signature.
         try:
-            pdu = yield self._check_sigs_and_hash(pdu)
+            pdu = yield self._check_sigs_and_hash(room_version, pdu)
         except SynapseError as e:
             raise FederationError(
                 "ERROR",
diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py
index 3fdd63be95..30941f5ad6 100644
--- a/synapse/federation/transaction_queue.py
+++ b/synapse/federation/transaction_queue.py
@@ -22,14 +22,17 @@ from prometheus_client import Counter
 from twisted.internet import defer
 
 import synapse.metrics
-from synapse.api.errors import FederationDeniedError, HttpResponseException
+from synapse.api.errors import (
+    FederationDeniedError,
+    HttpResponseException,
+    RequestSendFailed,
+)
 from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
 from synapse.metrics import (
     LaterGauge,
     event_processing_loop_counter,
     event_processing_loop_room_count,
     events_processed_counter,
-    sent_edus_counter,
     sent_transactions_counter,
 )
 from synapse.metrics.background_process_metrics import run_as_background_process
@@ -43,10 +46,24 @@ from .units import Edu, Transaction
 logger = logging.getLogger(__name__)
 
 sent_pdus_destination_dist_count = Counter(
-    "synapse_federation_client_sent_pdu_destinations:count", ""
+    "synapse_federation_client_sent_pdu_destinations:count",
+    "Number of PDUs queued for sending to one or more destinations",
 )
+
 sent_pdus_destination_dist_total = Counter(
     "synapse_federation_client_sent_pdu_destinations:total", ""
+    "Total number of PDUs queued for sending across all destinations",
+)
+
+sent_edus_counter = Counter(
+    "synapse_federation_client_sent_edus",
+    "Total number of EDUs successfully sent",
+)
+
+sent_edus_by_type = Counter(
+    "synapse_federation_client_sent_edus_by_type",
+    "Number of sent EDUs successfully sent, by event type",
+    ["type"],
 )
 
 
@@ -171,7 +188,7 @@ class TransactionQueue(object):
                 def handle_event(event):
                     # Only send events for this server.
                     send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
-                    is_mine = self.is_mine_id(event.event_id)
+                    is_mine = self.is_mine_id(event.sender)
                     if not is_mine and send_on_behalf_of is None:
                         return
 
@@ -183,9 +200,7 @@ class TransactionQueue(object):
                         # banned then it won't receive the event because it won't
                         # be in the room after the ban.
                         destinations = yield self.state.get_current_hosts_in_room(
-                            event.room_id, latest_event_ids=[
-                                prev_id for prev_id, _ in event.prev_events
-                            ],
+                            event.room_id, latest_event_ids=event.prev_event_ids(),
                         )
                     except Exception:
                         logger.exception(
@@ -358,8 +373,6 @@ class TransactionQueue(object):
             logger.info("Not sending EDU to ourselves")
             return
 
-        sent_edus_counter.inc()
-
         if key:
             self.pending_edus_keyed_by_dest.setdefault(
                 destination, {}
@@ -494,6 +507,9 @@ class TransactionQueue(object):
                 )
                 if success:
                     sent_transactions_counter.inc()
+                    sent_edus_counter.inc(len(pending_edus))
+                    for edu in pending_edus:
+                        sent_edus_by_type.labels(edu.edu_type).inc()
                     # Remove the acknowledged device messages from the database
                     # Only bother if we actually sent some device messages
                     if device_message_edus:
@@ -520,11 +536,21 @@ class TransactionQueue(object):
             )
         except FederationDeniedError as e:
             logger.info(e)
-        except Exception as e:
-            logger.warn(
-                "TX [%s] Failed to send transaction: %s",
+        except HttpResponseException as e:
+            logger.warning(
+                "TX [%s] Received %d response to transaction: %s",
+                destination, e.code, e,
+            )
+        except RequestSendFailed as e:
+            logger.warning("TX [%s] Failed to send transaction: %s", destination, e)
+
+            for p, _ in pending_pdus:
+                logger.info("Failed to send event %s to %s", p.event_id,
+                            destination)
+        except Exception:
+            logger.exception(
+                "TX [%s] Failed to send transaction",
                 destination,
-                e,
             )
             for p, _ in pending_pdus:
                 logger.info("Failed to send event %s to %s", p.event_id,
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index edba5a9808..8e2be218e2 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -21,7 +21,7 @@ from six.moves import urllib
 from twisted.internet import defer
 
 from synapse.api.constants import Membership
-from synapse.api.urls import FEDERATION_PREFIX as PREFIX
+from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
 from synapse.util.logutils import log_function
 
 logger = logging.getLogger(__name__)
@@ -51,7 +51,7 @@ class TransportLayerClient(object):
         logger.debug("get_room_state dest=%s, room=%s",
                      destination, room_id)
 
-        path = _create_path(PREFIX, "/state/%s/", room_id)
+        path = _create_v1_path("/state/%s/", room_id)
         return self.client.get_json(
             destination, path=path, args={"event_id": event_id},
         )
@@ -73,7 +73,7 @@ class TransportLayerClient(object):
         logger.debug("get_room_state_ids dest=%s, room=%s",
                      destination, room_id)
 
-        path = _create_path(PREFIX, "/state_ids/%s/", room_id)
+        path = _create_v1_path("/state_ids/%s/", room_id)
         return self.client.get_json(
             destination, path=path, args={"event_id": event_id},
         )
@@ -95,7 +95,7 @@ class TransportLayerClient(object):
         logger.debug("get_pdu dest=%s, event_id=%s",
                      destination, event_id)
 
-        path = _create_path(PREFIX, "/event/%s/", event_id)
+        path = _create_v1_path("/event/%s/", event_id)
         return self.client.get_json(destination, path=path, timeout=timeout)
 
     @log_function
@@ -121,7 +121,7 @@ class TransportLayerClient(object):
             # TODO: raise?
             return
 
-        path = _create_path(PREFIX, "/backfill/%s/", room_id)
+        path = _create_v1_path("/backfill/%s/", room_id)
 
         args = {
             "v": event_tuples,
@@ -167,7 +167,7 @@ class TransportLayerClient(object):
         # generated by the json_data_callback.
         json_data = transaction.get_dict()
 
-        path = _create_path(PREFIX, "/send/%s/", transaction.transaction_id)
+        path = _create_v1_path("/send/%s/", transaction.transaction_id)
 
         response = yield self.client.put_json(
             transaction.destination,
@@ -184,7 +184,7 @@ class TransportLayerClient(object):
     @log_function
     def make_query(self, destination, query_type, args, retry_on_dns_fail,
                    ignore_backoff=False):
-        path = _create_path(PREFIX, "/query/%s", query_type)
+        path = _create_v1_path("/query/%s", query_type)
 
         content = yield self.client.get_json(
             destination=destination,
@@ -231,7 +231,7 @@ class TransportLayerClient(object):
                 "make_membership_event called with membership='%s', must be one of %s" %
                 (membership, ",".join(valid_memberships))
             )
-        path = _create_path(PREFIX, "/make_%s/%s/%s", membership, room_id, user_id)
+        path = _create_v1_path("/make_%s/%s/%s", membership, room_id, user_id)
 
         ignore_backoff = False
         retry_on_dns_fail = False
@@ -258,7 +258,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def send_join(self, destination, room_id, event_id, content):
-        path = _create_path(PREFIX, "/send_join/%s/%s", room_id, event_id)
+        path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
 
         response = yield self.client.put_json(
             destination=destination,
@@ -271,7 +271,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def send_leave(self, destination, room_id, event_id, content):
-        path = _create_path(PREFIX, "/send_leave/%s/%s", room_id, event_id)
+        path = _create_v1_path("/send_leave/%s/%s", room_id, event_id)
 
         response = yield self.client.put_json(
             destination=destination,
@@ -289,8 +289,22 @@ class TransportLayerClient(object):
 
     @defer.inlineCallbacks
     @log_function
-    def send_invite(self, destination, room_id, event_id, content):
-        path = _create_path(PREFIX, "/invite/%s/%s", room_id, event_id)
+    def send_invite_v1(self, destination, room_id, event_id, content):
+        path = _create_v1_path("/invite/%s/%s", room_id, event_id)
+
+        response = yield self.client.put_json(
+            destination=destination,
+            path=path,
+            data=content,
+            ignore_backoff=True,
+        )
+
+        defer.returnValue(response)
+
+    @defer.inlineCallbacks
+    @log_function
+    def send_invite_v2(self, destination, room_id, event_id, content):
+        path = _create_v2_path("/invite/%s/%s", room_id, event_id)
 
         response = yield self.client.put_json(
             destination=destination,
@@ -306,7 +320,7 @@ class TransportLayerClient(object):
     def get_public_rooms(self, remote_server, limit, since_token,
                          search_filter=None, include_all_networks=False,
                          third_party_instance_id=None):
-        path = PREFIX + "/publicRooms"
+        path = _create_v1_path("/publicRooms")
 
         args = {
             "include_all_networks": "true" if include_all_networks else "false",
@@ -332,7 +346,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def exchange_third_party_invite(self, destination, room_id, event_dict):
-        path = _create_path(PREFIX, "/exchange_third_party_invite/%s", room_id,)
+        path = _create_v1_path("/exchange_third_party_invite/%s", room_id,)
 
         response = yield self.client.put_json(
             destination=destination,
@@ -345,7 +359,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def get_event_auth(self, destination, room_id, event_id):
-        path = _create_path(PREFIX, "/event_auth/%s/%s", room_id, event_id)
+        path = _create_v1_path("/event_auth/%s/%s", room_id, event_id)
 
         content = yield self.client.get_json(
             destination=destination,
@@ -357,7 +371,7 @@ class TransportLayerClient(object):
     @defer.inlineCallbacks
     @log_function
     def send_query_auth(self, destination, room_id, event_id, content):
-        path = _create_path(PREFIX, "/query_auth/%s/%s", room_id, event_id)
+        path = _create_v1_path("/query_auth/%s/%s", room_id, event_id)
 
         content = yield self.client.post_json(
             destination=destination,
@@ -392,7 +406,7 @@ class TransportLayerClient(object):
         Returns:
             A dict containg the device keys.
         """
-        path = PREFIX + "/user/keys/query"
+        path = _create_v1_path("/user/keys/query")
 
         content = yield self.client.post_json(
             destination=destination,
@@ -419,7 +433,7 @@ class TransportLayerClient(object):
         Returns:
             A dict containg the device keys.
         """
-        path = _create_path(PREFIX, "/user/devices/%s", user_id)
+        path = _create_v1_path("/user/devices/%s", user_id)
 
         content = yield self.client.get_json(
             destination=destination,
@@ -455,7 +469,7 @@ class TransportLayerClient(object):
             A dict containg the one-time keys.
         """
 
-        path = PREFIX + "/user/keys/claim"
+        path = _create_v1_path("/user/keys/claim")
 
         content = yield self.client.post_json(
             destination=destination,
@@ -469,7 +483,7 @@ class TransportLayerClient(object):
     @log_function
     def get_missing_events(self, destination, room_id, earliest_events,
                            latest_events, limit, min_depth, timeout):
-        path = _create_path(PREFIX, "/get_missing_events/%s", room_id,)
+        path = _create_v1_path("/get_missing_events/%s", room_id,)
 
         content = yield self.client.post_json(
             destination=destination,
@@ -489,7 +503,7 @@ class TransportLayerClient(object):
     def get_group_profile(self, destination, group_id, requester_user_id):
         """Get a group profile
         """
-        path = _create_path(PREFIX, "/groups/%s/profile", group_id,)
+        path = _create_v1_path("/groups/%s/profile", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -508,7 +522,7 @@ class TransportLayerClient(object):
             requester_user_id (str)
             content (dict): The new profile of the group
         """
-        path = _create_path(PREFIX, "/groups/%s/profile", group_id,)
+        path = _create_v1_path("/groups/%s/profile", group_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -522,7 +536,7 @@ class TransportLayerClient(object):
     def get_group_summary(self, destination, group_id, requester_user_id):
         """Get a group summary
         """
-        path = _create_path(PREFIX, "/groups/%s/summary", group_id,)
+        path = _create_v1_path("/groups/%s/summary", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -535,7 +549,7 @@ class TransportLayerClient(object):
     def get_rooms_in_group(self, destination, group_id, requester_user_id):
         """Get all rooms in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/rooms", group_id,)
+        path = _create_v1_path("/groups/%s/rooms", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -548,7 +562,7 @@ class TransportLayerClient(object):
                           content):
         """Add a room to a group
         """
-        path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,)
+        path = _create_v1_path("/groups/%s/room/%s", group_id, room_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -562,8 +576,8 @@ class TransportLayerClient(object):
                              config_key, content):
         """Update room in group
         """
-        path = _create_path(
-            PREFIX, "/groups/%s/room/%s/config/%s",
+        path = _create_v1_path(
+            "/groups/%s/room/%s/config/%s",
             group_id, room_id, config_key,
         )
 
@@ -578,7 +592,7 @@ class TransportLayerClient(object):
     def remove_room_from_group(self, destination, group_id, requester_user_id, room_id):
         """Remove a room from a group
         """
-        path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,)
+        path = _create_v1_path("/groups/%s/room/%s", group_id, room_id,)
 
         return self.client.delete_json(
             destination=destination,
@@ -591,7 +605,7 @@ class TransportLayerClient(object):
     def get_users_in_group(self, destination, group_id, requester_user_id):
         """Get users in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/users", group_id,)
+        path = _create_v1_path("/groups/%s/users", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -604,7 +618,7 @@ class TransportLayerClient(object):
     def get_invited_users_in_group(self, destination, group_id, requester_user_id):
         """Get users that have been invited to a group
         """
-        path = _create_path(PREFIX, "/groups/%s/invited_users", group_id,)
+        path = _create_v1_path("/groups/%s/invited_users", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -617,8 +631,8 @@ class TransportLayerClient(object):
     def accept_group_invite(self, destination, group_id, user_id, content):
         """Accept a group invite
         """
-        path = _create_path(
-            PREFIX, "/groups/%s/users/%s/accept_invite",
+        path = _create_v1_path(
+            "/groups/%s/users/%s/accept_invite",
             group_id, user_id,
         )
 
@@ -633,7 +647,7 @@ class TransportLayerClient(object):
     def join_group(self, destination, group_id, user_id, content):
         """Attempts to join a group
         """
-        path = _create_path(PREFIX, "/groups/%s/users/%s/join", group_id, user_id)
+        path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -646,7 +660,7 @@ class TransportLayerClient(object):
     def invite_to_group(self, destination, group_id, user_id, requester_user_id, content):
         """Invite a user to a group
         """
-        path = _create_path(PREFIX, "/groups/%s/users/%s/invite", group_id, user_id)
+        path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -662,7 +676,7 @@ class TransportLayerClient(object):
         invited.
         """
 
-        path = _create_path(PREFIX, "/groups/local/%s/users/%s/invite", group_id, user_id)
+        path = _create_v1_path("/groups/local/%s/users/%s/invite", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -676,7 +690,7 @@ class TransportLayerClient(object):
                                user_id, content):
         """Remove a user fron a group
         """
-        path = _create_path(PREFIX, "/groups/%s/users/%s/remove", group_id, user_id)
+        path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -693,7 +707,7 @@ class TransportLayerClient(object):
         kicked from the group.
         """
 
-        path = _create_path(PREFIX, "/groups/local/%s/users/%s/remove", group_id, user_id)
+        path = _create_v1_path("/groups/local/%s/users/%s/remove", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -708,7 +722,7 @@ class TransportLayerClient(object):
         the attestations
         """
 
-        path = _create_path(PREFIX, "/groups/%s/renew_attestation/%s", group_id, user_id)
+        path = _create_v1_path("/groups/%s/renew_attestation/%s", group_id, user_id)
 
         return self.client.post_json(
             destination=destination,
@@ -723,12 +737,12 @@ class TransportLayerClient(object):
         """Update a room entry in a group summary
         """
         if category_id:
-            path = _create_path(
-                PREFIX, "/groups/%s/summary/categories/%s/rooms/%s",
+            path = _create_v1_path(
+                "/groups/%s/summary/categories/%s/rooms/%s",
                 group_id, category_id, room_id,
             )
         else:
-            path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,)
+            path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -744,12 +758,12 @@ class TransportLayerClient(object):
         """Delete a room entry in a group summary
         """
         if category_id:
-            path = _create_path(
-                PREFIX + "/groups/%s/summary/categories/%s/rooms/%s",
+            path = _create_v1_path(
+                "/groups/%s/summary/categories/%s/rooms/%s",
                 group_id, category_id, room_id,
             )
         else:
-            path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,)
+            path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id,)
 
         return self.client.delete_json(
             destination=destination,
@@ -762,7 +776,7 @@ class TransportLayerClient(object):
     def get_group_categories(self, destination, group_id, requester_user_id):
         """Get all categories in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/categories", group_id,)
+        path = _create_v1_path("/groups/%s/categories", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -775,7 +789,7 @@ class TransportLayerClient(object):
     def get_group_category(self, destination, group_id, requester_user_id, category_id):
         """Get category info in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
+        path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -789,7 +803,7 @@ class TransportLayerClient(object):
                               content):
         """Update a category in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
+        path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -804,7 +818,7 @@ class TransportLayerClient(object):
                               category_id):
         """Delete a category in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
+        path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
 
         return self.client.delete_json(
             destination=destination,
@@ -817,7 +831,7 @@ class TransportLayerClient(object):
     def get_group_roles(self, destination, group_id, requester_user_id):
         """Get all roles in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/roles", group_id,)
+        path = _create_v1_path("/groups/%s/roles", group_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -830,7 +844,7 @@ class TransportLayerClient(object):
     def get_group_role(self, destination, group_id, requester_user_id, role_id):
         """Get a roles info
         """
-        path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
+        path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
 
         return self.client.get_json(
             destination=destination,
@@ -844,7 +858,7 @@ class TransportLayerClient(object):
                           content):
         """Update a role in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
+        path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -858,7 +872,7 @@ class TransportLayerClient(object):
     def delete_group_role(self, destination, group_id, requester_user_id, role_id):
         """Delete a role in a group
         """
-        path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
+        path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
 
         return self.client.delete_json(
             destination=destination,
@@ -873,12 +887,12 @@ class TransportLayerClient(object):
         """Update a users entry in a group
         """
         if role_id:
-            path = _create_path(
-                PREFIX, "/groups/%s/summary/roles/%s/users/%s",
+            path = _create_v1_path(
+                "/groups/%s/summary/roles/%s/users/%s",
                 group_id, role_id, user_id,
             )
         else:
-            path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,)
+            path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id,)
 
         return self.client.post_json(
             destination=destination,
@@ -893,7 +907,7 @@ class TransportLayerClient(object):
                               content):
         """Sets the join policy for a group
         """
-        path = _create_path(PREFIX, "/groups/%s/settings/m.join_policy", group_id,)
+        path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id,)
 
         return self.client.put_json(
             destination=destination,
@@ -909,12 +923,12 @@ class TransportLayerClient(object):
         """Delete a users entry in a group
         """
         if role_id:
-            path = _create_path(
-                PREFIX, "/groups/%s/summary/roles/%s/users/%s",
+            path = _create_v1_path(
+                "/groups/%s/summary/roles/%s/users/%s",
                 group_id, role_id, user_id,
             )
         else:
-            path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,)
+            path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id,)
 
         return self.client.delete_json(
             destination=destination,
@@ -927,7 +941,7 @@ class TransportLayerClient(object):
         """Get the groups a list of users are publicising
         """
 
-        path = PREFIX + "/get_groups_publicised"
+        path = _create_v1_path("/get_groups_publicised")
 
         content = {"user_ids": user_ids}
 
@@ -939,20 +953,43 @@ class TransportLayerClient(object):
         )
 
 
-def _create_path(prefix, path, *args):
-    """Creates a path from the prefix, path template and args. Ensures that
-    all args are url encoded.
+def _create_v1_path(path, *args):
+    """Creates a path against V1 federation API from the path template and
+    args. Ensures that all args are url encoded.
+
+    Example:
+
+        _create_v1_path("/event/%s/", event_id)
+
+    Args:
+        path (str): String template for the path
+        args: ([str]): Args to insert into path. Each arg will be url encoded
+
+    Returns:
+        str
+    """
+    return (
+        FEDERATION_V1_PREFIX
+        + path % tuple(urllib.parse.quote(arg, "") for arg in args)
+    )
+
+
+def _create_v2_path(path, *args):
+    """Creates a path against V2 federation API from the path template and
+    args. Ensures that all args are url encoded.
 
     Example:
 
-        _create_path(PREFIX, "/event/%s/", event_id)
+        _create_v2_path("/event/%s/", event_id)
 
     Args:
-        prefix (str)
         path (str): String template for the path
         args: ([str]): Args to insert into path. Each arg will be url encoded
 
     Returns:
         str
     """
-    return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
+    return (
+        FEDERATION_V2_PREFIX
+        + path % tuple(urllib.parse.quote(arg, "") for arg in args)
+    )
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 6d4a26f595..5ba94be2ec 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -21,8 +21,9 @@ import re
 from twisted.internet import defer
 
 import synapse
+from synapse.api.constants import RoomVersions
 from synapse.api.errors import Codes, FederationDeniedError, SynapseError
-from synapse.api.urls import FEDERATION_PREFIX as PREFIX
+from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
 from synapse.http.endpoint import parse_and_validate_server_name
 from synapse.http.server import JsonResource
 from synapse.http.servlet import (
@@ -42,9 +43,20 @@ logger = logging.getLogger(__name__)
 class TransportLayerServer(JsonResource):
     """Handles incoming federation HTTP requests"""
 
-    def __init__(self, hs):
+    def __init__(self, hs, servlet_groups=None):
+        """Initialize the TransportLayerServer
+
+        Will by default register all servlets. For custom behaviour, pass in
+        a list of servlet_groups to register.
+
+        Args:
+            hs (synapse.server.HomeServer): homeserver
+            servlet_groups (list[str], optional): List of servlet groups to register.
+                Defaults to ``DEFAULT_SERVLET_GROUPS``.
+        """
         self.hs = hs
         self.clock = hs.get_clock()
+        self.servlet_groups = servlet_groups
 
         super(TransportLayerServer, self).__init__(hs, canonical_json=False)
 
@@ -66,6 +78,7 @@ class TransportLayerServer(JsonResource):
             resource=self,
             ratelimiter=self.ratelimiter,
             authenticator=self.authenticator,
+            servlet_groups=self.servlet_groups,
         )
 
 
@@ -227,6 +240,8 @@ class BaseFederationServlet(object):
     """
     REQUIRE_AUTH = True
 
+    PREFIX = FEDERATION_V1_PREFIX  # Allows specifying the API version
+
     def __init__(self, handler, authenticator, ratelimiter, server_name):
         self.handler = handler
         self.authenticator = authenticator
@@ -286,7 +301,7 @@ class BaseFederationServlet(object):
         return new_func
 
     def register(self, server):
-        pattern = re.compile("^" + PREFIX + self.PATH + "$")
+        pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
 
         for method in ("GET", "PUT", "POST"):
             code = getattr(self, "on_%s" % (method), None)
@@ -362,14 +377,6 @@ class FederationSendServlet(BaseFederationServlet):
         defer.returnValue((code, response))
 
 
-class FederationPullServlet(BaseFederationServlet):
-    PATH = "/pull/"
-
-    # This is for when someone asks us for everything since version X
-    def on_GET(self, origin, content, query):
-        return self.handler.on_pull_request(query["origin"][0], query["v"])
-
-
 class FederationEventServlet(BaseFederationServlet):
     PATH = "/event/(?P<event_id>[^/]*)/"
 
@@ -474,7 +481,7 @@ class FederationSendLeaveServlet(BaseFederationServlet):
 
     @defer.inlineCallbacks
     def on_PUT(self, origin, content, query, room_id, event_id):
-        content = yield self.handler.on_send_leave_request(origin, content)
+        content = yield self.handler.on_send_leave_request(origin, content, room_id)
         defer.returnValue((200, content))
 
 
@@ -492,18 +499,50 @@ class FederationSendJoinServlet(BaseFederationServlet):
     def on_PUT(self, origin, content, query, context, event_id):
         # TODO(paul): assert that context/event_id parsed from path actually
         #   match those given in content
-        content = yield self.handler.on_send_join_request(origin, content)
+        content = yield self.handler.on_send_join_request(origin, content, context)
         defer.returnValue((200, content))
 
 
-class FederationInviteServlet(BaseFederationServlet):
+class FederationV1InviteServlet(BaseFederationServlet):
     PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
 
     @defer.inlineCallbacks
     def on_PUT(self, origin, content, query, context, event_id):
+        # We don't get a room version, so we have to assume its EITHER v1 or
+        # v2. This is "fine" as the only difference between V1 and V2 is the
+        # state resolution algorithm, and we don't use that for processing
+        # invites
+        content = yield self.handler.on_invite_request(
+            origin, content, room_version=RoomVersions.V1,
+        )
+
+        # V1 federation API is defined to return a content of `[200, {...}]`
+        # due to a historical bug.
+        defer.returnValue((200, (200, content)))
+
+
+class FederationV2InviteServlet(BaseFederationServlet):
+    PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
+
+    PREFIX = FEDERATION_V2_PREFIX
+
+    @defer.inlineCallbacks
+    def on_PUT(self, origin, content, query, context, event_id):
         # TODO(paul): assert that context/event_id parsed from path actually
         #   match those given in content
-        content = yield self.handler.on_invite_request(origin, content)
+
+        room_version = content["room_version"]
+        event = content["event"]
+        invite_room_state = content["invite_room_state"]
+
+        # Synapse expects invite_room_state to be in unsigned, as it is in v1
+        # API
+
+        event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
+
+        content = yield self.handler.on_invite_request(
+            origin, event, room_version=room_version,
+        )
         defer.returnValue((200, content))
 
 
@@ -1262,7 +1301,6 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
 
 FEDERATION_SERVLET_CLASSES = (
     FederationSendServlet,
-    FederationPullServlet,
     FederationEventServlet,
     FederationStateServlet,
     FederationStateIdsServlet,
@@ -1273,7 +1311,8 @@ FEDERATION_SERVLET_CLASSES = (
     FederationEventServlet,
     FederationSendJoinServlet,
     FederationSendLeaveServlet,
-    FederationInviteServlet,
+    FederationV1InviteServlet,
+    FederationV2InviteServlet,
     FederationQueryAuthServlet,
     FederationGetMissingEventsServlet,
     FederationEventAuthServlet,
@@ -1282,10 +1321,12 @@ FEDERATION_SERVLET_CLASSES = (
     FederationClientKeysClaimServlet,
     FederationThirdPartyInviteExchangeServlet,
     On3pidBindServlet,
-    OpenIdUserInfo,
     FederationVersionServlet,
 )
 
+OPENID_SERVLET_CLASSES = (
+    OpenIdUserInfo,
+)
 
 ROOM_LIST_CLASSES = (
     PublicRoomList,
@@ -1324,44 +1365,83 @@ GROUP_ATTESTATION_SERVLET_CLASSES = (
     FederationGroupsRenewAttestaionServlet,
 )
 
+DEFAULT_SERVLET_GROUPS = (
+    "federation",
+    "room_list",
+    "group_server",
+    "group_local",
+    "group_attestation",
+    "openid",
+)
+
 
-def register_servlets(hs, resource, authenticator, ratelimiter):
-    for servletclass in FEDERATION_SERVLET_CLASSES:
-        servletclass(
-            handler=hs.get_federation_server(),
-            authenticator=authenticator,
-            ratelimiter=ratelimiter,
-            server_name=hs.hostname,
-        ).register(resource)
-
-    for servletclass in ROOM_LIST_CLASSES:
-        servletclass(
-            handler=hs.get_room_list_handler(),
-            authenticator=authenticator,
-            ratelimiter=ratelimiter,
-            server_name=hs.hostname,
-        ).register(resource)
-
-    for servletclass in GROUP_SERVER_SERVLET_CLASSES:
-        servletclass(
-            handler=hs.get_groups_server_handler(),
-            authenticator=authenticator,
-            ratelimiter=ratelimiter,
-            server_name=hs.hostname,
-        ).register(resource)
-
-    for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
-        servletclass(
-            handler=hs.get_groups_local_handler(),
-            authenticator=authenticator,
-            ratelimiter=ratelimiter,
-            server_name=hs.hostname,
-        ).register(resource)
-
-    for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
-        servletclass(
-            handler=hs.get_groups_attestation_renewer(),
-            authenticator=authenticator,
-            ratelimiter=ratelimiter,
-            server_name=hs.hostname,
-        ).register(resource)
+def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=None):
+    """Initialize and register servlet classes.
+
+    Will by default register all servlets. For custom behaviour, pass in
+    a list of servlet_groups to register.
+
+    Args:
+        hs (synapse.server.HomeServer): homeserver
+        resource (TransportLayerServer): resource class to register to
+        authenticator (Authenticator): authenticator to use
+        ratelimiter (util.ratelimitutils.FederationRateLimiter): ratelimiter to use
+        servlet_groups (list[str], optional): List of servlet groups to register.
+            Defaults to ``DEFAULT_SERVLET_GROUPS``.
+    """
+    if not servlet_groups:
+        servlet_groups = DEFAULT_SERVLET_GROUPS
+
+    if "federation" in servlet_groups:
+        for servletclass in FEDERATION_SERVLET_CLASSES:
+            servletclass(
+                handler=hs.get_federation_server(),
+                authenticator=authenticator,
+                ratelimiter=ratelimiter,
+                server_name=hs.hostname,
+            ).register(resource)
+
+    if "openid" in servlet_groups:
+        for servletclass in OPENID_SERVLET_CLASSES:
+            servletclass(
+                handler=hs.get_federation_server(),
+                authenticator=authenticator,
+                ratelimiter=ratelimiter,
+                server_name=hs.hostname,
+            ).register(resource)
+
+    if "room_list" in servlet_groups:
+        for servletclass in ROOM_LIST_CLASSES:
+            servletclass(
+                handler=hs.get_room_list_handler(),
+                authenticator=authenticator,
+                ratelimiter=ratelimiter,
+                server_name=hs.hostname,
+            ).register(resource)
+
+    if "group_server" in servlet_groups:
+        for servletclass in GROUP_SERVER_SERVLET_CLASSES:
+            servletclass(
+                handler=hs.get_groups_server_handler(),
+                authenticator=authenticator,
+                ratelimiter=ratelimiter,
+                server_name=hs.hostname,
+            ).register(resource)
+
+    if "group_local" in servlet_groups:
+        for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
+            servletclass(
+                handler=hs.get_groups_local_handler(),
+                authenticator=authenticator,
+                ratelimiter=ratelimiter,
+                server_name=hs.hostname,
+            ).register(resource)
+
+    if "group_attestation" in servlet_groups:
+        for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
+            servletclass(
+                handler=hs.get_groups_attestation_renewer(),
+                authenticator=authenticator,
+                ratelimiter=ratelimiter,
+                server_name=hs.hostname,
+            ).register(resource)
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index c5ab14314e..025a79c022 100644
--- a/synapse/federation/units.py
+++ b/synapse/federation/units.py
@@ -117,9 +117,6 @@ class Transaction(JsonEncodedObject):
                 "Require 'transaction_id' to construct a Transaction"
             )
 
-        for p in pdus:
-            p.transaction_id = kwargs["transaction_id"]
-
         kwargs["pdus"] = [p.get_pdu_json() for p in pdus]
 
         return Transaction(**kwargs)
diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py
index b04f4234ca..786149be65 100644
--- a/synapse/groups/attestations.py
+++ b/synapse/groups/attestations.py
@@ -42,7 +42,7 @@ from signedjson.sign import sign_json
 
 from twisted.internet import defer
 
-from synapse.api.errors import SynapseError
+from synapse.api.errors import RequestSendFailed, SynapseError
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.types import get_domain_from_id
 from synapse.util.logcontext import run_in_background
@@ -191,6 +191,11 @@ class GroupAttestionRenewer(object):
                 yield self.store.update_attestation_renewal(
                     group_id, user_id, attestation
                 )
+            except RequestSendFailed as e:
+                logger.warning(
+                    "Failed to renew attestation of %r in %r: %s",
+                    user_id, group_id, e,
+                )
             except Exception:
                 logger.exception("Error renewing attestation of %r in %r",
                                  user_id, group_id)
diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py
index 413425fed1..2dd183018a 100644
--- a/synapse/handlers/__init__.py
+++ b/synapse/handlers/__init__.py
@@ -17,7 +17,6 @@ from .admin import AdminHandler
 from .directory import DirectoryHandler
 from .federation import FederationHandler
 from .identity import IdentityHandler
-from .register import RegistrationHandler
 from .search import SearchHandler
 
 
@@ -41,7 +40,6 @@ class Handlers(object):
     """
 
     def __init__(self, hs):
-        self.registration_handler = RegistrationHandler(hs)
         self.federation_handler = FederationHandler(hs)
         self.directory_handler = DirectoryHandler(hs)
         self.admin_handler = AdminHandler(hs)
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index 704181d2d3..594754cfd8 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -167,4 +167,4 @@ class BaseHandler(object):
                     ratelimit=False,
                 )
             except Exception as e:
-                logger.warn("Error kicking guest user: %s" % (e,))
+                logger.exception("Error kicking guest user: %s" % (e,))
diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py
new file mode 100644
index 0000000000..813777bf18
--- /dev/null
+++ b/synapse/handlers/acme.py
@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import attr
+from zope.interface import implementer
+
+import twisted
+import twisted.internet.error
+from twisted.internet import defer
+from twisted.python.filepath import FilePath
+from twisted.python.url import URL
+from twisted.web import server, static
+from twisted.web.resource import Resource
+
+from synapse.app import check_bind_error
+
+logger = logging.getLogger(__name__)
+
+try:
+    from txacme.interfaces import ICertificateStore
+
+    @attr.s
+    @implementer(ICertificateStore)
+    class ErsatzStore(object):
+        """
+        A store that only stores in memory.
+        """
+
+        certs = attr.ib(default=attr.Factory(dict))
+
+        def store(self, server_name, pem_objects):
+            self.certs[server_name] = [o.as_bytes() for o in pem_objects]
+            return defer.succeed(None)
+
+
+except ImportError:
+    # txacme is missing
+    pass
+
+
+class AcmeHandler(object):
+    def __init__(self, hs):
+        self.hs = hs
+        self.reactor = hs.get_reactor()
+        self._acme_domain = hs.config.acme_domain
+
+    @defer.inlineCallbacks
+    def start_listening(self):
+
+        # Configure logging for txacme, if you need to debug
+        # from eliot import add_destinations
+        # from eliot.twisted import TwistedDestination
+        #
+        # add_destinations(TwistedDestination())
+
+        from txacme.challenges import HTTP01Responder
+        from txacme.service import AcmeIssuingService
+        from txacme.endpoint import load_or_create_client_key
+        from txacme.client import Client
+        from josepy.jwa import RS256
+
+        self._store = ErsatzStore()
+        responder = HTTP01Responder()
+
+        self._issuer = AcmeIssuingService(
+            cert_store=self._store,
+            client_creator=(
+                lambda: Client.from_url(
+                    reactor=self.reactor,
+                    url=URL.from_text(self.hs.config.acme_url),
+                    key=load_or_create_client_key(
+                        FilePath(self.hs.config.config_dir_path)
+                    ),
+                    alg=RS256,
+                )
+            ),
+            clock=self.reactor,
+            responders=[responder],
+        )
+
+        well_known = Resource()
+        well_known.putChild(b'acme-challenge', responder.resource)
+        responder_resource = Resource()
+        responder_resource.putChild(b'.well-known', well_known)
+        responder_resource.putChild(b'check', static.Data(b'OK', b'text/plain'))
+
+        srv = server.Site(responder_resource)
+
+        bind_addresses = self.hs.config.acme_bind_addresses
+        for host in bind_addresses:
+            logger.info(
+                "Listening for ACME requests on %s:%i", host, self.hs.config.acme_port,
+            )
+            try:
+                self.reactor.listenTCP(
+                    self.hs.config.acme_port,
+                    srv,
+                    interface=host,
+                )
+            except twisted.internet.error.CannotListenError as e:
+                check_bind_error(e, host, bind_addresses)
+
+        # Make sure we are registered to the ACME server. There's no public API
+        # for this, it is usually triggered by startService, but since we don't
+        # want it to control where we save the certificates, we have to reach in
+        # and trigger the registration machinery ourselves.
+        self._issuer._registered = False
+        yield self._issuer._ensure_registered()
+
+    @defer.inlineCallbacks
+    def provision_certificate(self):
+
+        logger.warning("Reprovisioning %s", self._acme_domain)
+
+        try:
+            yield self._issuer.issue_cert(self._acme_domain)
+        except Exception:
+            logger.exception("Fail!")
+            raise
+        logger.warning("Reprovisioned %s, saving.", self._acme_domain)
+        cert_chain = self._store.certs[self._acme_domain]
+
+        try:
+            with open(self.hs.config.tls_private_key_file, "wb") as private_key_file:
+                for x in cert_chain:
+                    if x.startswith(b"-----BEGIN RSA PRIVATE KEY-----"):
+                        private_key_file.write(x)
+
+            with open(self.hs.config.tls_certificate_file, "wb") as certificate_file:
+                for x in cert_chain:
+                    if x.startswith(b"-----BEGIN CERTIFICATE-----"):
+                        certificate_file.write(x)
+        except Exception:
+            logger.exception("Failed saving!")
+            raise
+
+        defer.returnValue(True)
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 329e3c7d71..2abd9af94f 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -59,6 +59,7 @@ class AuthHandler(BaseHandler):
             LoginType.EMAIL_IDENTITY: self._check_email_identity,
             LoginType.MSISDN: self._check_msisdn,
             LoginType.DUMMY: self._check_dummy_auth,
+            LoginType.TERMS: self._check_terms_auth,
         }
         self.bcrypt_rounds = hs.config.bcrypt_rounds
 
@@ -431,6 +432,9 @@ class AuthHandler(BaseHandler):
     def _check_dummy_auth(self, authdict, _):
         return defer.succeed(True)
 
+    def _check_terms_auth(self, authdict, _):
+        return defer.succeed(True)
+
     @defer.inlineCallbacks
     def _check_threepid(self, medium, authdict):
         if 'threepid_creds' not in authdict:
@@ -462,6 +466,22 @@ class AuthHandler(BaseHandler):
     def _get_params_recaptcha(self):
         return {"public_key": self.hs.config.recaptcha_public_key}
 
+    def _get_params_terms(self):
+        return {
+            "policies": {
+                "privacy_policy": {
+                    "version": self.hs.config.user_consent_version,
+                    "en": {
+                        "name": self.hs.config.user_consent_policy_name,
+                        "url": "%s_matrix/consent?v=%s" % (
+                            self.hs.config.public_baseurl,
+                            self.hs.config.user_consent_version,
+                        ),
+                    },
+                },
+            },
+        }
+
     def _auth_dict_for_flows(self, flows, session):
         public_flows = []
         for f in flows:
@@ -469,6 +489,7 @@ class AuthHandler(BaseHandler):
 
         get_params = {
             LoginType.RECAPTCHA: self._get_params_recaptcha,
+            LoginType.TERMS: self._get_params_terms,
         }
 
         params = {}
@@ -542,10 +563,10 @@ class AuthHandler(BaseHandler):
         insensitively, but return None if there are multiple inexact matches.
 
         Args:
-            (str) user_id: complete @user:id
+            (unicode|bytes) user_id: complete @user:id
 
         Returns:
-            defer.Deferred: (str) canonical_user_id, or None if zero or
+            defer.Deferred: (unicode) canonical_user_id, or None if zero or
             multiple matches
         """
         res = yield self._find_user_id_and_pwd_hash(user_id)
@@ -933,6 +954,15 @@ class MacaroonGenerator(object):
         return macaroon.serialize()
 
     def generate_short_term_login_token(self, user_id, duration_in_ms=(2 * 60 * 1000)):
+        """
+
+        Args:
+            user_id (unicode):
+            duration_in_ms (int):
+
+        Returns:
+            unicode
+        """
         macaroon = self._generate_base_macaroon(user_id)
         macaroon.add_first_party_caveat("type = login")
         now = self.hs.get_clock().time_msec()
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 4fb2db45e3..b711ce1e86 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -20,7 +20,11 @@ from twisted.internet import defer
 
 from synapse.api import errors
 from synapse.api.constants import EventTypes
-from synapse.api.errors import FederationDeniedError
+from synapse.api.errors import (
+    FederationDeniedError,
+    HttpResponseException,
+    RequestSendFailed,
+)
 from synapse.types import RoomStreamToken, get_domain_from_id
 from synapse.util import stringutils
 from synapse.util.async_helpers import Linearizer
@@ -529,13 +533,13 @@ class DeviceListEduUpdater(object):
                 origin = get_domain_from_id(user_id)
                 try:
                     result = yield self.federation.query_user_devices(origin, user_id)
-                except NotRetryingDestination:
+                except (
+                    NotRetryingDestination, RequestSendFailed, HttpResponseException,
+                ):
                     # TODO: Remember that we are now out of sync and try again
                     # later
                     logger.warn(
-                        "Failed to handle device list update for %s,"
-                        " we're not retrying the remote",
-                        user_id,
+                        "Failed to handle device list update for %s", user_id,
                     )
                     # We abort on exceptions rather than accepting the update
                     # as otherwise synapse will 'forget' that its device list
@@ -564,6 +568,24 @@ class DeviceListEduUpdater(object):
                         user_id, device["device_id"], stream_id,
                     )
 
+                # If the remote server has more than ~1000 devices for this user
+                # we assume that something is going horribly wrong (e.g. a bot
+                # that logs in and creates a new device every time it tries to
+                # send a message).  Maintaining lots of devices per user in the
+                # cache can cause serious performance issues as if this request
+                # takes more than 60s to complete, internal replication from the
+                # inbound federation worker to the synapse master may time out
+                # causing the inbound federation to fail and causing the remote
+                # server to retry, causing a DoS.  So in this scenario we give
+                # up on storing the total list of devices and only handle the
+                # delta instead.
+                if len(devices) > 1000:
+                    logger.warn(
+                        "Ignoring device list snapshot for %s as it has >1K devs (%d)",
+                        user_id, len(devices)
+                    )
+                    devices = []
+
                 yield self.store.update_remote_device_list_cache(
                     user_id, devices, stream_id,
                 )
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 7d67bf803a..8b113307d2 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -57,8 +57,8 @@ class DirectoryHandler(BaseHandler):
         # general association creation for both human users and app services
 
         for wchar in string.whitespace:
-                if wchar in room_alias.localpart:
-                    raise SynapseError(400, "Invalid characters in room alias")
+            if wchar in room_alias.localpart:
+                raise SynapseError(400, "Invalid characters in room alias")
 
         if not self.hs.is_mine(room_alias):
             raise SynapseError(400, "Room alias must be local")
@@ -112,7 +112,9 @@ class DirectoryHandler(BaseHandler):
                     403, "This user is not permitted to create this alias",
                 )
 
-            if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()):
+            if not self.config.is_alias_creation_allowed(
+                user_id, room_id, room_alias.to_string(),
+            ):
                 # Lets just return a generic message, as there may be all sorts of
                 # reasons why we said no. TODO: Allow configurable error messages
                 # per alias creation rule?
@@ -138,9 +140,30 @@ class DirectoryHandler(BaseHandler):
             )
 
     @defer.inlineCallbacks
-    def delete_association(self, requester, room_alias):
-        # association deletion for human users
+    def delete_association(self, requester, room_alias, send_event=True):
+        """Remove an alias from the directory
 
+        (this is only meant for human users; AS users should call
+        delete_appservice_association)
+
+        Args:
+            requester (Requester):
+            room_alias (RoomAlias):
+            send_event (bool): Whether to send an updated m.room.aliases event.
+                Note that, if we delete the canonical alias, we will always attempt
+                to send an m.room.canonical_alias event
+
+        Returns:
+            Deferred[unicode]: room id that the alias used to point to
+
+        Raises:
+            NotFoundError: if the alias doesn't exist
+
+            AuthError: if the user doesn't have perms to delete the alias (ie, the user
+                is neither the creator of the alias, nor a server admin.
+
+            SynapseError: if the alias belongs to an AS
+        """
         user_id = requester.user.to_string()
 
         try:
@@ -168,10 +191,11 @@ class DirectoryHandler(BaseHandler):
         room_id = yield self._delete_association(room_alias)
 
         try:
-            yield self.send_room_alias_update_event(
-                requester,
-                room_id
-            )
+            if send_event:
+                yield self.send_room_alias_update_event(
+                    requester,
+                    room_id
+                )
 
             yield self._update_canonical_alias(
                 requester,
@@ -373,9 +397,9 @@ class DirectoryHandler(BaseHandler):
         room_id (str)
         visibility (str): "public" or "private"
         """
-        if not self.spam_checker.user_may_publish_room(
-            requester.user.to_string(), room_id
-        ):
+        user_id = requester.user.to_string()
+
+        if not self.spam_checker.user_may_publish_room(user_id, room_id):
             raise AuthError(
                 403,
                 "This user is not permitted to publish rooms to the room list"
@@ -393,7 +417,24 @@ class DirectoryHandler(BaseHandler):
 
         yield self.auth.check_can_change_room_list(room_id, requester.user)
 
-        yield self.store.set_room_is_public(room_id, visibility == "public")
+        making_public = visibility == "public"
+        if making_public:
+            room_aliases = yield self.store.get_aliases_for_room(room_id)
+            canonical_alias = yield self.store.get_canonical_alias_for_room(room_id)
+            if canonical_alias:
+                room_aliases.append(canonical_alias)
+
+            if not self.config.is_publishing_room_allowed(
+                user_id, room_id, room_aliases,
+            ):
+                # Lets just return a generic message, as there may be all sorts of
+                # reasons why we said no. TODO: Allow configurable error messages
+                # per alias creation rule?
+                raise SynapseError(
+                    403, "Not allowed to publish room",
+                )
+
+        yield self.store.set_room_is_public(room_id, making_public)
 
     @defer.inlineCallbacks
     def edit_published_appservice_room_list(self, appservice_id, network_id,
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 5edb3cfe04..7bc174070e 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -19,7 +19,13 @@ from six import iteritems
 
 from twisted.internet import defer
 
-from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError
+from synapse.api.errors import (
+    Codes,
+    NotFoundError,
+    RoomKeysVersionError,
+    StoreError,
+    SynapseError,
+)
 from synapse.util.async_helpers import Linearizer
 
 logger = logging.getLogger(__name__)
@@ -55,6 +61,8 @@ class E2eRoomKeysHandler(object):
             room_id(string): room ID to get keys for, for None to get keys for all rooms
             session_id(string): session ID to get keys for, for None to get keys for all
                 sessions
+        Raises:
+            NotFoundError: if the backup version does not exist
         Returns:
             A deferred list of dicts giving the session_data and message metadata for
             these room keys.
@@ -63,13 +71,19 @@ class E2eRoomKeysHandler(object):
         # we deliberately take the lock to get keys so that changing the version
         # works atomically
         with (yield self._upload_linearizer.queue(user_id)):
+            # make sure the backup version exists
+            try:
+                yield self.store.get_e2e_room_keys_version_info(user_id, version)
+            except StoreError as e:
+                if e.code == 404:
+                    raise NotFoundError("Unknown backup version")
+                else:
+                    raise
+
             results = yield self.store.get_e2e_room_keys(
                 user_id, version, room_id, session_id
             )
 
-            if results['rooms'] == {}:
-                raise SynapseError(404, "No room_keys found")
-
             defer.returnValue(results)
 
     @defer.inlineCallbacks
@@ -120,7 +134,7 @@ class E2eRoomKeysHandler(object):
         }
 
         Raises:
-            SynapseError: with code 404 if there are no versions defined
+            NotFoundError: if there are no versions defined
             RoomKeysVersionError: if the uploaded version is not the current version
         """
 
@@ -134,7 +148,7 @@ class E2eRoomKeysHandler(object):
                 version_info = yield self.store.get_e2e_room_keys_version_info(user_id)
             except StoreError as e:
                 if e.code == 404:
-                    raise SynapseError(404, "Version '%s' not found" % (version,))
+                    raise NotFoundError("Version '%s' not found" % (version,))
                 else:
                     raise
 
@@ -148,7 +162,7 @@ class E2eRoomKeysHandler(object):
                     raise RoomKeysVersionError(current_version=version_info['version'])
                 except StoreError as e:
                     if e.code == 404:
-                        raise SynapseError(404, "Version '%s' not found" % (version,))
+                        raise NotFoundError("Version '%s' not found" % (version,))
                     else:
                         raise
 
@@ -259,7 +273,7 @@ class E2eRoomKeysHandler(object):
             version(str): Optional; if None gives the most recent version
                 otherwise a historical one.
         Raises:
-            StoreError: code 404 if the requested backup version doesn't exist
+            NotFoundError: if the requested backup version doesn't exist
         Returns:
             A deferred of a info dict that gives the info about the new version.
 
@@ -271,7 +285,13 @@ class E2eRoomKeysHandler(object):
         """
 
         with (yield self._upload_linearizer.queue(user_id)):
-            res = yield self.store.get_e2e_room_keys_version_info(user_id, version)
+            try:
+                res = yield self.store.get_e2e_room_keys_version_info(user_id, version)
+            except StoreError as e:
+                if e.code == 404:
+                    raise NotFoundError("Unknown backup version")
+                else:
+                    raise
             defer.returnValue(res)
 
     @defer.inlineCallbacks
@@ -282,8 +302,60 @@ class E2eRoomKeysHandler(object):
             user_id(str): the user whose current backup version we're deleting
             version(str): the version id of the backup being deleted
         Raises:
-            StoreError: code 404 if this backup version doesn't exist
+            NotFoundError: if this backup version doesn't exist
         """
 
         with (yield self._upload_linearizer.queue(user_id)):
-            yield self.store.delete_e2e_room_keys_version(user_id, version)
+            try:
+                yield self.store.delete_e2e_room_keys_version(user_id, version)
+            except StoreError as e:
+                if e.code == 404:
+                    raise NotFoundError("Unknown backup version")
+                else:
+                    raise
+
+    @defer.inlineCallbacks
+    def update_version(self, user_id, version, version_info):
+        """Update the info about a given version of the user's backup
+
+        Args:
+            user_id(str): the user whose current backup version we're updating
+            version(str): the backup version we're updating
+            version_info(dict): the new information about the backup
+        Raises:
+            NotFoundError: if the requested backup version doesn't exist
+        Returns:
+            A deferred of an empty dict.
+        """
+        if "version" not in version_info:
+            raise SynapseError(
+                400,
+                "Missing version in body",
+                Codes.MISSING_PARAM
+            )
+        if version_info["version"] != version:
+            raise SynapseError(
+                400,
+                "Version in body does not match",
+                Codes.INVALID_PARAM
+            )
+        with (yield self._upload_linearizer.queue(user_id)):
+            try:
+                old_info = yield self.store.get_e2e_room_keys_version_info(
+                    user_id, version
+                )
+            except StoreError as e:
+                if e.code == 404:
+                    raise NotFoundError("Unknown backup version")
+                else:
+                    raise
+            if old_info["algorithm"] != version_info["algorithm"]:
+                raise SynapseError(
+                    400,
+                    "Algorithm does not match",
+                    Codes.INVALID_PARAM
+                )
+
+            yield self.store.update_e2e_room_keys_version(user_id, version, version_info)
+
+            defer.returnValue({})
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index cd5b9bbb19..f80486102a 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -34,6 +34,7 @@ from synapse.api.constants import (
     EventTypes,
     Membership,
     RejectedReason,
+    RoomVersions,
 )
 from synapse.api.errors import (
     AuthError,
@@ -43,10 +44,7 @@ from synapse.api.errors import (
     StoreError,
     SynapseError,
 )
-from synapse.crypto.event_signing import (
-    add_hashes_and_signatures,
-    compute_event_signature,
-)
+from synapse.crypto.event_signing import compute_event_signature
 from synapse.events.validator import EventValidator
 from synapse.replication.http.federation import (
     ReplicationCleanRoomRestServlet,
@@ -58,7 +56,6 @@ from synapse.types import UserID, get_domain_from_id
 from synapse.util import logcontext, unwrapFirstError
 from synapse.util.async_helpers import Linearizer
 from synapse.util.distributor import user_joined_room
-from synapse.util.frozenutils import unfreeze
 from synapse.util.logutils import log_function
 from synapse.util.retryutils import NotRetryingDestination
 from synapse.visibility import filter_events_for_server
@@ -105,7 +102,7 @@ class FederationHandler(BaseHandler):
 
         self.hs = hs
 
-        self.store = hs.get_datastore()  # type: synapse.storage.DataStore
+        self.store = hs.get_datastore()
         self.federation_client = hs.get_federation_client()
         self.state_handler = hs.get_state_handler()
         self.server_name = hs.hostname
@@ -202,27 +199,22 @@ class FederationHandler(BaseHandler):
             self.room_queues[room_id].append((pdu, origin))
             return
 
-        # If we're no longer in the room just ditch the event entirely. This
-        # is probably an old server that has come back and thinks we're still
-        # in the room (or we've been rejoined to the room by a state reset).
+        # If we're not in the room just ditch the event entirely. This is
+        # probably an old server that has come back and thinks we're still in
+        # the room (or we've been rejoined to the room by a state reset).
         #
-        # If we were never in the room then maybe our database got vaped and
-        # we should check if we *are* in fact in the room. If we are then we
-        # can magically rejoin the room.
+        # Note that if we were never in the room then we would have already
+        # dropped the event, since we wouldn't know the room version.
         is_in_room = yield self.auth.check_host_in_room(
             room_id,
             self.server_name
         )
         if not is_in_room:
-            was_in_room = yield self.store.was_host_joined(
-                pdu.room_id, self.server_name,
+            logger.info(
+                "[%s %s] Ignoring PDU from %s as we're not in the room",
+                room_id, event_id, origin,
             )
-            if was_in_room:
-                logger.info(
-                    "[%s %s] Ignoring PDU from %s as we've left the room",
-                    room_id, event_id, origin,
-                )
-                defer.returnValue(None)
+            defer.returnValue(None)
 
         state = None
         auth_chain = []
@@ -239,7 +231,7 @@ class FederationHandler(BaseHandler):
                 room_id, event_id, min_depth,
             )
 
-            prevs = {e_id for e_id, _ in pdu.prev_events}
+            prevs = set(pdu.prev_event_ids())
             seen = yield self.store.have_seen_events(prevs)
 
             if min_depth and pdu.depth < min_depth:
@@ -347,6 +339,8 @@ class FederationHandler(BaseHandler):
                             room_id, event_id, p,
                         )
 
+                        room_version = yield self.store.get_room_version(room_id)
+
                         with logcontext.nested_logging_context(p):
                             # note that if any of the missing prevs share missing state or
                             # auth events, the requests to fetch those events are deduped
@@ -360,7 +354,7 @@ class FederationHandler(BaseHandler):
                             # we want the state *after* p; get_state_for_room returns the
                             # state *before* p.
                             remote_event = yield self.federation_client.get_pdu(
-                                [origin], p, outlier=True,
+                                [origin], p, room_version, outlier=True,
                             )
 
                             if remote_event is None:
@@ -384,7 +378,6 @@ class FederationHandler(BaseHandler):
                             for x in remote_state:
                                 event_map[x.event_id] = x
 
-                    room_version = yield self.store.get_room_version(room_id)
                     state_map = yield resolve_events_with_store(
                         room_version, state_maps, event_map,
                         state_res_store=StateResolutionStore(self.store),
@@ -557,86 +550,54 @@ class FederationHandler(BaseHandler):
             room_id, event_id, event,
         )
 
-        # FIXME (erikj): Awful hack to make the case where we are not currently
-        # in the room work
-        # If state and auth_chain are None, then we don't need to do this check
-        # as we already know we have enough state in the DB to handle this
-        # event.
-        if state and auth_chain and not event.internal_metadata.is_outlier():
-            is_in_room = yield self.auth.check_host_in_room(
-                room_id,
-                self.server_name
-            )
-        else:
-            is_in_room = True
-
-        if not is_in_room:
-            logger.info(
-                "[%s %s] Got event for room we're not in",
-                room_id, event_id,
-            )
+        event_ids = set()
+        if state:
+            event_ids |= {e.event_id for e in state}
+        if auth_chain:
+            event_ids |= {e.event_id for e in auth_chain}
 
-            try:
-                yield self._persist_auth_tree(
-                    origin, auth_chain, state, event
-                )
-            except AuthError as e:
-                raise FederationError(
-                    "ERROR",
-                    e.code,
-                    e.msg,
-                    affected=event_id,
-                )
-
-        else:
-            event_ids = set()
-            if state:
-                event_ids |= {e.event_id for e in state}
-            if auth_chain:
-                event_ids |= {e.event_id for e in auth_chain}
+        seen_ids = yield self.store.have_seen_events(event_ids)
 
-            seen_ids = yield self.store.have_seen_events(event_ids)
+        if state and auth_chain is not None:
+            # If we have any state or auth_chain given to us by the replication
+            # layer, then we should handle them (if we haven't before.)
 
-            if state and auth_chain is not None:
-                # If we have any state or auth_chain given to us by the replication
-                # layer, then we should handle them (if we haven't before.)
+            event_infos = []
 
-                event_infos = []
-
-                for e in itertools.chain(auth_chain, state):
-                    if e.event_id in seen_ids:
-                        continue
-                    e.internal_metadata.outlier = True
-                    auth_ids = [e_id for e_id, _ in e.auth_events]
-                    auth = {
-                        (e.type, e.state_key): e for e in auth_chain
-                        if e.event_id in auth_ids or e.type == EventTypes.Create
-                    }
-                    event_infos.append({
-                        "event": e,
-                        "auth_events": auth,
-                    })
-                    seen_ids.add(e.event_id)
+            for e in itertools.chain(auth_chain, state):
+                if e.event_id in seen_ids:
+                    continue
+                e.internal_metadata.outlier = True
+                auth_ids = e.auth_event_ids()
+                auth = {
+                    (e.type, e.state_key): e for e in auth_chain
+                    if e.event_id in auth_ids or e.type == EventTypes.Create
+                }
+                event_infos.append({
+                    "event": e,
+                    "auth_events": auth,
+                })
+                seen_ids.add(e.event_id)
 
-                logger.info(
-                    "[%s %s] persisting newly-received auth/state events %s",
-                    room_id, event_id, [e["event"].event_id for e in event_infos]
-                )
-                yield self._handle_new_events(origin, event_infos)
+            logger.info(
+                "[%s %s] persisting newly-received auth/state events %s",
+                room_id, event_id, [e["event"].event_id for e in event_infos]
+            )
+            yield self._handle_new_events(origin, event_infos)
 
-            try:
-                context = yield self._handle_new_event(
-                    origin,
-                    event,
-                    state=state,
-                )
-            except AuthError as e:
-                raise FederationError(
-                    "ERROR",
-                    e.code,
-                    e.msg,
-                    affected=event.event_id,
-                )
+        try:
+            context = yield self._handle_new_event(
+                origin,
+                event,
+                state=state,
+            )
+        except AuthError as e:
+            raise FederationError(
+                "ERROR",
+                e.code,
+                e.msg,
+                affected=event.event_id,
+            )
 
         room = yield self.store.get_room(room_id)
 
@@ -692,6 +653,8 @@ class FederationHandler(BaseHandler):
         if dest == self.server_name:
             raise SynapseError(400, "Can't backfill from self.")
 
+        room_version = yield self.store.get_room_version(room_id)
+
         events = yield self.federation_client.backfill(
             dest,
             room_id,
@@ -726,7 +689,7 @@ class FederationHandler(BaseHandler):
         edges = [
             ev.event_id
             for ev in events
-            if set(e_id for e_id, _ in ev.prev_events) - event_ids
+            if set(ev.prev_event_ids()) - event_ids
         ]
 
         logger.info(
@@ -753,7 +716,7 @@ class FederationHandler(BaseHandler):
         required_auth = set(
             a_id
             for event in events + list(state_events.values()) + list(auth_events.values())
-            for a_id, _ in event.auth_events
+            for a_id in event.auth_event_ids()
         )
         auth_events.update({
             e_id: event_map[e_id] for e_id in required_auth if e_id in event_map
@@ -769,7 +732,7 @@ class FederationHandler(BaseHandler):
             auth_events.update(ret_events)
 
             required_auth.update(
-                a_id for event in ret_events.values() for a_id, _ in event.auth_events
+                a_id for event in ret_events.values() for a_id in event.auth_event_ids()
             )
             missing_auth = required_auth - set(auth_events)
 
@@ -785,6 +748,7 @@ class FederationHandler(BaseHandler):
                             self.federation_client.get_pdu,
                             [dest],
                             event_id,
+                            room_version=room_version,
                             outlier=True,
                             timeout=10000,
                         )
@@ -796,7 +760,7 @@ class FederationHandler(BaseHandler):
                 required_auth.update(
                     a_id
                     for event in results if event
-                    for a_id, _ in event.auth_events
+                    for a_id in event.auth_event_ids()
                 )
                 missing_auth = required_auth - set(auth_events)
 
@@ -806,29 +770,52 @@ class FederationHandler(BaseHandler):
             set(auth_events.keys()) | set(state_events.keys())
         )
 
+        # We now have a chunk of events plus associated state and auth chain to
+        # persist. We do the persistence in two steps:
+        #   1. Auth events and state get persisted as outliers, plus the
+        #      backward extremities get persisted (as non-outliers).
+        #   2. The rest of the events in the chunk get persisted one by one, as
+        #      each one depends on the previous event for its state.
+        #
+        # The important thing is that events in the chunk get persisted as
+        # non-outliers, including when those events are also in the state or
+        # auth chain. Caution must therefore be taken to ensure that they are
+        # not accidentally marked as outliers.
+
+        # Step 1a: persist auth events that *don't* appear in the chunk
         ev_infos = []
         for a in auth_events.values():
-            if a.event_id in seen_events:
+            # We only want to persist auth events as outliers that we haven't
+            # seen and aren't about to persist as part of the backfilled chunk.
+            if a.event_id in seen_events or a.event_id in event_map:
                 continue
+
             a.internal_metadata.outlier = True
             ev_infos.append({
                 "event": a,
                 "auth_events": {
                     (auth_events[a_id].type, auth_events[a_id].state_key):
                     auth_events[a_id]
-                    for a_id, _ in a.auth_events
+                    for a_id in a.auth_event_ids()
                     if a_id in auth_events
                 }
             })
 
+        # Step 1b: persist the events in the chunk we fetched state for (i.e.
+        # the backwards extremities) as non-outliers.
         for e_id in events_to_state:
+            # For paranoia we ensure that these events are marked as
+            # non-outliers
+            ev = event_map[e_id]
+            assert(not ev.internal_metadata.is_outlier())
+
             ev_infos.append({
-                "event": event_map[e_id],
+                "event": ev,
                 "state": events_to_state[e_id],
                 "auth_events": {
                     (auth_events[a_id].type, auth_events[a_id].state_key):
                     auth_events[a_id]
-                    for a_id, _ in event_map[e_id].auth_events
+                    for a_id in ev.auth_event_ids()
                     if a_id in auth_events
                 }
             })
@@ -838,12 +825,17 @@ class FederationHandler(BaseHandler):
             backfilled=True,
         )
 
+        # Step 2: Persist the rest of the events in the chunk one by one
         events.sort(key=lambda e: e.depth)
 
         for event in events:
             if event in events_to_state:
                 continue
 
+            # For paranoia we ensure that these events are marked as
+            # non-outliers
+            assert(not event.internal_metadata.is_outlier())
+
             # We store these one at a time since each event depends on the
             # previous to work out the state.
             # TODO: We can probably do something more clever here.
@@ -1041,17 +1033,17 @@ class FederationHandler(BaseHandler):
         Raises:
             SynapseError if the event does not pass muster
         """
-        if len(ev.prev_events) > 20:
+        if len(ev.prev_event_ids()) > 20:
             logger.warn("Rejecting event %s which has %i prev_events",
-                        ev.event_id, len(ev.prev_events))
+                        ev.event_id, len(ev.prev_event_ids()))
             raise SynapseError(
                 http_client.BAD_REQUEST,
                 "Too many prev_events",
             )
 
-        if len(ev.auth_events) > 10:
+        if len(ev.auth_event_ids()) > 10:
             logger.warn("Rejecting event %s which has %i auth_events",
-                        ev.event_id, len(ev.auth_events))
+                        ev.event_id, len(ev.auth_event_ids()))
             raise SynapseError(
                 http_client.BAD_REQUEST,
                 "Too many auth_events",
@@ -1076,7 +1068,7 @@ class FederationHandler(BaseHandler):
     def on_event_auth(self, event_id):
         event = yield self.store.get_event(event_id)
         auth = yield self.store.get_auth_chain(
-            [auth_id for auth_id, _ in event.auth_events],
+            [auth_id for auth_id in event.auth_event_ids()],
             include_given=True
         )
         defer.returnValue([e for e in auth])
@@ -1097,7 +1089,7 @@ class FederationHandler(BaseHandler):
         """
         logger.debug("Joining %s to %s", joinee, room_id)
 
-        origin, event = yield self._make_and_verify_event(
+        origin, event, event_format_version = yield self._make_and_verify_event(
             target_hosts,
             room_id,
             joinee,
@@ -1120,7 +1112,6 @@ class FederationHandler(BaseHandler):
         handled_events = set()
 
         try:
-            event = self._sign_event(event)
             # Try the host we successfully got a response to /make_join/
             # request first.
             try:
@@ -1128,7 +1119,9 @@ class FederationHandler(BaseHandler):
                 target_hosts.insert(0, origin)
             except ValueError:
                 pass
-            ret = yield self.federation_client.send_join(target_hosts, event)
+            ret = yield self.federation_client.send_join(
+                target_hosts, event, event_format_version,
+            )
 
             origin = ret["origin"]
             state = ret["state"]
@@ -1201,13 +1194,18 @@ class FederationHandler(BaseHandler):
         """
         event_content = {"membership": Membership.JOIN}
 
-        builder = self.event_builder_factory.new({
-            "type": EventTypes.Member,
-            "content": event_content,
-            "room_id": room_id,
-            "sender": user_id,
-            "state_key": user_id,
-        })
+        room_version = yield self.store.get_room_version(room_id)
+
+        builder = self.event_builder_factory.new(
+            room_version,
+            {
+                "type": EventTypes.Member,
+                "content": event_content,
+                "room_id": room_id,
+                "sender": user_id,
+                "state_key": user_id,
+            }
+        )
 
         try:
             event, context = yield self.event_creation_handler.create_new_client_event(
@@ -1219,7 +1217,9 @@ class FederationHandler(BaseHandler):
 
         # The remote hasn't signed it yet, obviously. We'll do the full checks
         # when we get the event back in `on_send_join_request`
-        yield self.auth.check_from_context(event, context, do_sig_check=False)
+        yield self.auth.check_from_context(
+            room_version, event, context, do_sig_check=False,
+        )
 
         defer.returnValue(event)
 
@@ -1324,11 +1324,11 @@ class FederationHandler(BaseHandler):
             )
 
         event.internal_metadata.outlier = True
-        event.internal_metadata.invite_from_remote = True
+        event.internal_metadata.out_of_band_membership = True
 
         event.signatures.update(
             compute_event_signature(
-                event,
+                event.get_pdu_json(),
                 self.hs.hostname,
                 self.hs.config.signing_key[0]
             )
@@ -1341,7 +1341,7 @@ class FederationHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def do_remotely_reject_invite(self, target_hosts, room_id, user_id):
-        origin, event = yield self._make_and_verify_event(
+        origin, event, event_format_version = yield self._make_and_verify_event(
             target_hosts,
             room_id,
             user_id,
@@ -1350,7 +1350,7 @@ class FederationHandler(BaseHandler):
         # Mark as outlier as we don't have any state for this event; we're not
         # even in the room.
         event.internal_metadata.outlier = True
-        event = self._sign_event(event)
+        event.internal_metadata.out_of_band_membership = True
 
         # Try the host that we succesfully called /make_leave/ on first for
         # the /send_leave/ request.
@@ -1373,7 +1373,7 @@ class FederationHandler(BaseHandler):
     @defer.inlineCallbacks
     def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
                                content={}, params=None):
-        origin, pdu = yield self.federation_client.make_membership_event(
+        origin, event, format_ver = yield self.federation_client.make_membership_event(
             target_hosts,
             room_id,
             user_id,
@@ -1382,9 +1382,7 @@ class FederationHandler(BaseHandler):
             params=params,
         )
 
-        logger.debug("Got response to make_%s: %s", membership, pdu)
-
-        event = pdu
+        logger.debug("Got response to make_%s: %s", membership, event)
 
         # We should assert some things.
         # FIXME: Do this in a nicer way
@@ -1392,28 +1390,7 @@ class FederationHandler(BaseHandler):
         assert(event.user_id == user_id)
         assert(event.state_key == user_id)
         assert(event.room_id == room_id)
-        defer.returnValue((origin, event))
-
-    def _sign_event(self, event):
-        event.internal_metadata.outlier = False
-
-        builder = self.event_builder_factory.new(
-            unfreeze(event.get_pdu_json())
-        )
-
-        builder.event_id = self.event_builder_factory.create_event_id()
-        builder.origin = self.hs.hostname
-
-        if not hasattr(event, "signatures"):
-            builder.signatures = {}
-
-        add_hashes_and_signatures(
-            builder,
-            self.hs.hostname,
-            self.hs.config.signing_key[0],
-        )
-
-        return builder.build()
+        defer.returnValue((origin, event, format_ver))
 
     @defer.inlineCallbacks
     @log_function
@@ -1422,13 +1399,17 @@ class FederationHandler(BaseHandler):
         leave event for the room and return that. We do *not* persist or
         process it until the other server has signed it and sent it back.
         """
-        builder = self.event_builder_factory.new({
-            "type": EventTypes.Member,
-            "content": {"membership": Membership.LEAVE},
-            "room_id": room_id,
-            "sender": user_id,
-            "state_key": user_id,
-        })
+        room_version = yield self.store.get_room_version(room_id)
+        builder = self.event_builder_factory.new(
+            room_version,
+            {
+                "type": EventTypes.Member,
+                "content": {"membership": Membership.LEAVE},
+                "room_id": room_id,
+                "sender": user_id,
+                "state_key": user_id,
+            }
+        )
 
         event, context = yield self.event_creation_handler.create_new_client_event(
             builder=builder,
@@ -1437,7 +1418,9 @@ class FederationHandler(BaseHandler):
         try:
             # The remote hasn't signed it yet, obviously. We'll do the full checks
             # when we get the event back in `on_send_leave_request`
-            yield self.auth.check_from_context(event, context, do_sig_check=False)
+            yield self.auth.check_from_context(
+                room_version, event, context, do_sig_check=False,
+            )
         except AuthError as e:
             logger.warn("Failed to create new leave %r because %s", event, e)
             raise e
@@ -1696,9 +1679,16 @@ class FederationHandler(BaseHandler):
                 create_event = e
                 break
 
+        if create_event is None:
+            # If the state doesn't have a create event then the room is
+            # invalid, and it would fail auth checks anyway.
+            raise SynapseError(400, "No create event in state")
+
+        room_version = create_event.content.get("room_version", RoomVersions.V1)
+
         missing_auth_events = set()
         for e in itertools.chain(auth_events, state, [event]):
-            for e_id, _ in e.auth_events:
+            for e_id in e.auth_event_ids():
                 if e_id not in event_map:
                     missing_auth_events.add(e_id)
 
@@ -1706,6 +1696,7 @@ class FederationHandler(BaseHandler):
             m_ev = yield self.federation_client.get_pdu(
                 [origin],
                 e_id,
+                room_version=room_version,
                 outlier=True,
                 timeout=10000,
             )
@@ -1717,14 +1708,14 @@ class FederationHandler(BaseHandler):
         for e in itertools.chain(auth_events, state, [event]):
             auth_for_e = {
                 (event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
-                for e_id, _ in e.auth_events
+                for e_id in e.auth_event_ids()
                 if e_id in event_map
             }
             if create_event:
                 auth_for_e[(EventTypes.Create, "")] = create_event
 
             try:
-                self.auth.check(e, auth_events=auth_for_e)
+                self.auth.check(room_version, e, auth_events=auth_for_e)
             except SynapseError as err:
                 # we may get SynapseErrors here as well as AuthErrors. For
                 # instance, there are a couple of (ancient) events in some
@@ -1785,10 +1776,10 @@ class FederationHandler(BaseHandler):
 
         # This is a hack to fix some old rooms where the initial join event
         # didn't reference the create event in its auth events.
-        if event.type == EventTypes.Member and not event.auth_events:
-            if len(event.prev_events) == 1 and event.depth < 5:
+        if event.type == EventTypes.Member and not event.auth_event_ids():
+            if len(event.prev_event_ids()) == 1 and event.depth < 5:
                 c = yield self.store.get_event(
-                    event.prev_events[0][0],
+                    event.prev_event_ids()[0],
                     allow_none=True,
                 )
                 if c and c.type == EventTypes.Create:
@@ -1835,7 +1826,7 @@ class FederationHandler(BaseHandler):
 
         # Now get the current auth_chain for the event.
         local_auth_chain = yield self.store.get_auth_chain(
-            [auth_id for auth_id, _ in event.auth_events],
+            [auth_id for auth_id in event.auth_event_ids()],
             include_given=True
         )
 
@@ -1891,7 +1882,7 @@ class FederationHandler(BaseHandler):
         """
         # Check if we have all the auth events.
         current_state = set(e.event_id for e in auth_events.values())
-        event_auth_events = set(e_id for e_id, _ in event.auth_events)
+        event_auth_events = set(event.auth_event_ids())
 
         if event.is_state():
             event_key = (event.type, event.state_key)
@@ -1935,7 +1926,7 @@ class FederationHandler(BaseHandler):
                         continue
 
                     try:
-                        auth_ids = [e_id for e_id, _ in e.auth_events]
+                        auth_ids = e.auth_event_ids()
                         auth = {
                             (e.type, e.state_key): e for e in remote_auth_chain
                             if e.event_id in auth_ids or e.type == EventTypes.Create
@@ -1956,7 +1947,7 @@ class FederationHandler(BaseHandler):
                         pass
 
                 have_events = yield self.store.get_seen_events_with_rejections(
-                    [e_id for e_id, _ in event.auth_events]
+                    event.auth_event_ids()
                 )
                 seen_events = set(have_events.keys())
             except Exception:
@@ -1968,6 +1959,8 @@ class FederationHandler(BaseHandler):
         current_state = set(e.event_id for e in auth_events.values())
         different_auth = event_auth_events - current_state
 
+        room_version = yield self.store.get_room_version(event.room_id)
+
         if different_auth and not event.internal_metadata.is_outlier():
             # Do auth conflict res.
             logger.info("Different auth: %s", different_auth)
@@ -1992,8 +1985,6 @@ class FederationHandler(BaseHandler):
                     (d.type, d.state_key): d for d in different_events if d
                 })
 
-                room_version = yield self.store.get_room_version(event.room_id)
-
                 new_state = yield self.state_handler.resolve_events(
                     room_version,
                     [list(local_view.values()), list(remote_view.values())],
@@ -2058,7 +2049,7 @@ class FederationHandler(BaseHandler):
                             continue
 
                         try:
-                            auth_ids = [e_id for e_id, _ in ev.auth_events]
+                            auth_ids = ev.auth_event_ids()
                             auth = {
                                 (e.type, e.state_key): e
                                 for e in result["auth_chain"]
@@ -2093,7 +2084,7 @@ class FederationHandler(BaseHandler):
                 )
 
         try:
-            self.auth.check(event, auth_events=auth_events)
+            self.auth.check(room_version, event, auth_events=auth_events)
         except AuthError as e:
             logger.warn("Failed auth resolution for %r because %s", event, e)
             raise e
@@ -2250,7 +2241,7 @@ class FederationHandler(BaseHandler):
         missing_remote_ids = [e.event_id for e in missing_remotes]
         base_remote_rejected = list(missing_remotes)
         for e in missing_remotes:
-            for e_id, _ in e.auth_events:
+            for e_id in e.auth_event_ids():
                 if e_id in missing_remote_ids:
                     try:
                         base_remote_rejected.remove(e)
@@ -2316,18 +2307,26 @@ class FederationHandler(BaseHandler):
         }
 
         if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)):
-            builder = self.event_builder_factory.new(event_dict)
-            EventValidator().validate_new(builder)
+            room_version = yield self.store.get_room_version(room_id)
+            builder = self.event_builder_factory.new(room_version, event_dict)
+
+            EventValidator().validate_builder(builder)
             event, context = yield self.event_creation_handler.create_new_client_event(
                 builder=builder
             )
 
             event, context = yield self.add_display_name_to_third_party_invite(
-                event_dict, event, context
+                room_version, event_dict, event, context
             )
 
+            EventValidator().validate_new(event)
+
+            # We need to tell the transaction queue to send this out, even
+            # though the sender isn't a local user.
+            event.internal_metadata.send_on_behalf_of = self.hs.hostname
+
             try:
-                yield self.auth.check_from_context(event, context)
+                yield self.auth.check_from_context(room_version, event, context)
             except AuthError as e:
                 logger.warn("Denying new third party invite %r because %s", event, e)
                 raise e
@@ -2354,23 +2353,31 @@ class FederationHandler(BaseHandler):
         Returns:
             Deferred: resolves (to None)
         """
-        builder = self.event_builder_factory.new(event_dict)
+        room_version = yield self.store.get_room_version(room_id)
+
+        # NB: event_dict has a particular specced format we might need to fudge
+        # if we change event formats too much.
+        builder = self.event_builder_factory.new(room_version, event_dict)
 
         event, context = yield self.event_creation_handler.create_new_client_event(
             builder=builder,
         )
 
         event, context = yield self.add_display_name_to_third_party_invite(
-            event_dict, event, context
+            room_version, event_dict, event, context
         )
 
         try:
-            self.auth.check_from_context(event, context)
+            self.auth.check_from_context(room_version, event, context)
         except AuthError as e:
             logger.warn("Denying third party invite %r because %s", event, e)
             raise e
         yield self._check_signature(event, context)
 
+        # We need to tell the transaction queue to send this out, even
+        # though the sender isn't a local user.
+        event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
+
         # XXX we send the invite here, but send_membership_event also sends it,
         # so we end up making two requests. I think this is redundant.
         returned_invite = yield self.send_invite(origin, event)
@@ -2381,7 +2388,8 @@ class FederationHandler(BaseHandler):
         yield member_handler.send_membership_event(None, event, context)
 
     @defer.inlineCallbacks
-    def add_display_name_to_third_party_invite(self, event_dict, event, context):
+    def add_display_name_to_third_party_invite(self, room_version, event_dict,
+                                               event, context):
         key = (
             EventTypes.ThirdPartyInvite,
             event.content["third_party_invite"]["signed"]["token"]
@@ -2405,11 +2413,12 @@ class FederationHandler(BaseHandler):
             # auth checks. If we need the invite and don't have it then the
             # auth check code will explode appropriately.
 
-        builder = self.event_builder_factory.new(event_dict)
-        EventValidator().validate_new(builder)
+        builder = self.event_builder_factory.new(room_version, event_dict)
+        EventValidator().validate_builder(builder)
         event, context = yield self.event_creation_handler.create_new_client_event(
             builder=builder,
         )
+        EventValidator().validate_new(event)
         defer.returnValue((event, context))
 
     @defer.inlineCallbacks
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index 173315af6c..02c508acec 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -20,7 +20,7 @@ from six import iteritems
 
 from twisted.internet import defer
 
-from synapse.api.errors import HttpResponseException, SynapseError
+from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
 from synapse.types import get_domain_from_id
 
 logger = logging.getLogger(__name__)
@@ -46,13 +46,19 @@ def _create_rerouter(func_name):
             # when the remote end responds with things like 403 Not
             # In Group, we can communicate that to the client instead
             # of a 500.
-            def h(failure):
+            def http_response_errback(failure):
                 failure.trap(HttpResponseException)
                 e = failure.value
                 if e.code == 403:
                     raise e.to_synapse_error()
                 return failure
-            d.addErrback(h)
+
+            def request_failed_errback(failure):
+                failure.trap(RequestSendFailed)
+                raise SynapseError(502, "Failed to contact group server")
+
+            d.addErrback(http_response_errback)
+            d.addErrback(request_failed_errback)
             return d
     return f
 
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 6255d3ffed..eaf0ba990e 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -173,18 +173,21 @@ class IdentityHandler(BaseHandler):
             "mxid": mxid,
             "threepid": threepid,
         }
-        headers = {}
+
         # we abuse the federation http client to sign the request, but we have to send it
         # using the normal http client since we don't want the SRV lookup and want normal
         # 'browser-like' HTTPS.
-        self.federation_http_client.sign_request(
+        auth_headers = self.federation_http_client.build_auth_headers(
             destination=None,
             method='POST',
             url_bytes='/_matrix/identity/api/v1/3pid/unbind'.encode('ascii'),
-            headers_dict=headers,
             content=content,
             destination_is=id_server,
         )
+        headers = {
+            b"Authorization": auth_headers,
+        }
+
         # if we have a rewrite rule set for the identity server,
         # apply it now.
         #
@@ -194,6 +197,7 @@ class IdentityHandler(BaseHandler):
             id_server = self.rewrite_identity_server_urls[id_server]
 
         url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
+
         try:
             yield self.http_client.post_json_get_json(
                 url,
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 969e588e73..3981fe69ce 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -22,7 +22,7 @@ from canonicaljson import encode_canonical_json, json
 from twisted.internet import defer
 from twisted.internet.defer import succeed
 
-from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
+from synapse.api.constants import EventTypes, Membership, RoomVersions
 from synapse.api.errors import (
     AuthError,
     Codes,
@@ -31,7 +31,6 @@ from synapse.api.errors import (
     SynapseError,
 )
 from synapse.api.urls import ConsentURIBuilder
-from synapse.crypto.event_signing import add_hashes_and_signatures
 from synapse.events.utils import serialize_event
 from synapse.events.validator import EventValidator
 from synapse.replication.http.send_event import ReplicationSendEventRestServlet
@@ -278,9 +277,17 @@ class EventCreationHandler(object):
         """
         yield self.auth.check_auth_blocking(requester.user.to_string())
 
-        builder = self.event_builder_factory.new(event_dict)
+        if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
+            room_version = event_dict["content"]["room_version"]
+        else:
+            try:
+                room_version = yield self.store.get_room_version(event_dict["room_id"])
+            except NotFoundError:
+                raise AuthError(403, "Unknown room")
+
+        builder = self.event_builder_factory.new(room_version, event_dict)
 
-        self.validator.validate_new(builder)
+        self.validator.validate_builder(builder)
 
         if builder.type == EventTypes.Member:
             membership = builder.content.get("membership", None)
@@ -318,6 +325,8 @@ class EventCreationHandler(object):
             prev_events_and_hashes=prev_events_and_hashes,
         )
 
+        self.validator.validate_new(event)
+
         defer.returnValue((event, context))
 
     def _is_exempt_from_privacy_policy(self, builder, requester):
@@ -427,6 +436,9 @@ class EventCreationHandler(object):
 
         if event.is_state():
             prev_state = yield self.deduplicate_state_event(event, context)
+            logger.info(
+                "Not bothering to persist duplicate state event %s", event.event_id,
+            )
             if prev_state is not None:
                 defer.returnValue(prev_state)
 
@@ -532,40 +544,19 @@ class EventCreationHandler(object):
             prev_events_and_hashes = \
                 yield self.store.get_prev_events_for_room(builder.room_id)
 
-        if prev_events_and_hashes:
-            depth = max([d for _, _, d in prev_events_and_hashes]) + 1
-            # we cap depth of generated events, to ensure that they are not
-            # rejected by other servers (and so that they can be persisted in
-            # the db)
-            depth = min(depth, MAX_DEPTH)
-        else:
-            depth = 1
-
         prev_events = [
             (event_id, prev_hashes)
             for event_id, prev_hashes, _ in prev_events_and_hashes
         ]
 
-        builder.prev_events = prev_events
-        builder.depth = depth
-
-        context = yield self.state.compute_event_context(builder)
+        event = yield builder.build(
+            prev_event_ids=[p for p, _ in prev_events],
+        )
+        context = yield self.state.compute_event_context(event)
         if requester:
             context.app_service = requester.app_service
 
-        if builder.is_state():
-            builder.prev_state = yield self.store.add_event_hashes(
-                context.prev_state_events
-            )
-
-        yield self.auth.add_auth_events(builder, context)
-
-        signing_key = self.hs.config.signing_key[0]
-        add_hashes_and_signatures(
-            builder, self.server_name, signing_key
-        )
-
-        event = builder.build()
+        self.validator.validate_new(event)
 
         logger.debug(
             "Created event %s",
@@ -600,8 +591,13 @@ class EventCreationHandler(object):
             extra_users (list(UserID)): Any extra users to notify about event
         """
 
+        if event.is_state() and (event.type, event.state_key) == (EventTypes.Create, ""):
+            room_version = event.content.get("room_version", RoomVersions.V1)
+        else:
+            room_version = yield self.store.get_room_version(event.room_id)
+
         try:
-            yield self.auth.check_from_context(event, context)
+            yield self.auth.check_from_context(room_version, event, context)
         except AuthError as err:
             logger.warn("Denying new event %r because %s", event, err)
             raise err
@@ -749,7 +745,8 @@ class EventCreationHandler(object):
             auth_events = {
                 (e.type, e.state_key): e for e in auth_events.values()
             }
-            if self.auth.check_redaction(event, auth_events=auth_events):
+            room_version = yield self.store.get_room_version(event.room_id)
+            if self.auth.check_redaction(room_version, event, auth_events=auth_events):
                 original_event = yield self.store.get_event(
                     event.redacts,
                     check_redacted=False,
@@ -763,6 +760,9 @@ class EventCreationHandler(object):
                         "You don't have permission to redact events"
                     )
 
+                # We've already checked.
+                event.internal_metadata.recheck_redaction = False
+
         if event.type == EventTypes.Create:
             prev_state_ids = yield context.get_prev_state_ids(self.store)
             if prev_state_ids:
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 43f81bd607..e4fdae9266 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -136,7 +136,11 @@ class PaginationHandler(object):
             logger.info("[purge] complete")
             self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
         except Exception:
-            logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
+            f = Failure()
+            logger.error(
+                "[purge] failed",
+                exc_info=(f.type, f.value, f.getTracebackObject()),
+            )
             self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
         finally:
             self._purges_in_progress_by_room.discard(room_id)
@@ -235,6 +239,17 @@ class PaginationHandler(object):
                 "room_key", next_key
             )
 
+        if events:
+            if event_filter:
+                events = event_filter.filter(events)
+
+            events = yield filter_events_for_client(
+                self.store,
+                user_id,
+                events,
+                is_peeking=(member_event_id is None),
+            )
+
         if not events:
             defer.returnValue({
                 "chunk": [],
@@ -242,18 +257,8 @@ class PaginationHandler(object):
                 "end": next_token.to_string(),
             })
 
-        if event_filter:
-            events = event_filter.filter(events)
-
-        events = yield filter_events_for_client(
-            self.store,
-            user_id,
-            events,
-            is_peeking=(member_event_id is None),
-        )
-
         state = None
-        if event_filter and event_filter.lazy_load_members():
+        if event_filter and event_filter.lazy_load_members() and len(events) > 0:
             # TODO: remove redundant members
 
             # FIXME: we also care about invite targets etc.
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 4c2690ba26..696469732c 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -16,8 +16,8 @@ import logging
 
 from twisted.internet import defer
 
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.types import get_domain_from_id
-from synapse.util import logcontext
 
 from ._base import BaseHandler
 
@@ -59,7 +59,9 @@ class ReceiptsHandler(BaseHandler):
         if is_new:
             # fire off a process in the background to send the receipt to
             # remote servers
-            self._push_remotes([receipt])
+            run_as_background_process(
+                'push_receipts_to_remotes', self._push_remotes, receipt
+            )
 
     @defer.inlineCallbacks
     def _received_remote_receipt(self, origin, content):
@@ -125,44 +127,42 @@ class ReceiptsHandler(BaseHandler):
 
         defer.returnValue(True)
 
-    @logcontext.preserve_fn   # caller should not yield on this
     @defer.inlineCallbacks
-    def _push_remotes(self, receipts):
-        """Given a list of receipts, works out which remote servers should be
+    def _push_remotes(self, receipt):
+        """Given a receipt, works out which remote servers should be
         poked and pokes them.
         """
         try:
-            # TODO: Some of this stuff should be coallesced.
-            for receipt in receipts:
-                room_id = receipt["room_id"]
-                receipt_type = receipt["receipt_type"]
-                user_id = receipt["user_id"]
-                event_ids = receipt["event_ids"]
-                data = receipt["data"]
-
-                users = yield self.state.get_current_user_in_room(room_id)
-                remotedomains = set(get_domain_from_id(u) for u in users)
-                remotedomains = remotedomains.copy()
-                remotedomains.discard(self.server_name)
-
-                logger.debug("Sending receipt to: %r", remotedomains)
-
-                for domain in remotedomains:
-                    self.federation.send_edu(
-                        destination=domain,
-                        edu_type="m.receipt",
-                        content={
-                            room_id: {
-                                receipt_type: {
-                                    user_id: {
-                                        "event_ids": event_ids,
-                                        "data": data,
-                                    }
+            # TODO: optimise this to move some of the work to the workers.
+            room_id = receipt["room_id"]
+            receipt_type = receipt["receipt_type"]
+            user_id = receipt["user_id"]
+            event_ids = receipt["event_ids"]
+            data = receipt["data"]
+
+            users = yield self.state.get_current_user_in_room(room_id)
+            remotedomains = set(get_domain_from_id(u) for u in users)
+            remotedomains = remotedomains.copy()
+            remotedomains.discard(self.server_name)
+
+            logger.debug("Sending receipt to: %r", remotedomains)
+
+            for domain in remotedomains:
+                self.federation.send_edu(
+                    destination=domain,
+                    edu_type="m.receipt",
+                    content={
+                        room_id: {
+                            receipt_type: {
+                                user_id: {
+                                    "event_ids": event_ids,
+                                    "data": data,
                                 }
-                            },
+                            }
                         },
-                        key=(room_id, receipt_type, user_id),
-                    )
+                    },
+                    key=(room_id, receipt_type, user_id),
+                )
         except Exception:
             logger.exception("Error pushing receipts to remote servers")
 
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index c060593fb1..7223af9014 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -19,6 +19,7 @@ import logging
 from twisted.internet import defer
 
 from synapse import types
+from synapse.api.constants import LoginType
 from synapse.api.errors import (
     AuthError,
     Codes,
@@ -26,7 +27,14 @@ from synapse.api.errors import (
     RegistrationError,
     SynapseError,
 )
+from synapse.config.server import is_threepid_reserved
 from synapse.http.client import CaptchaServerHttpClient
+from synapse.http.servlet import assert_params_in_dict
+from synapse.replication.http.login import RegisterDeviceReplicationServlet
+from synapse.replication.http.register import (
+    ReplicationPostRegisterActionsServlet,
+    ReplicationRegisterServlet,
+)
 from synapse.types import RoomAlias, RoomID, UserID, create_requester
 from synapse.util.async_helpers import Linearizer
 from synapse.util.threepids import check_3pid_allowed
@@ -50,9 +58,9 @@ class RegistrationHandler(BaseHandler):
         self._auth_handler = hs.get_auth_handler()
         self.profile_handler = hs.get_profile_handler()
         self.user_directory_handler = hs.get_user_directory_handler()
-        self.room_creation_handler = self.hs.get_room_creation_handler()
         self.captcha_client = CaptchaServerHttpClient(hs)
         self.http_client = hs.get_simple_http_client()
+        self.identity_handler = self.hs.get_handlers().identity_handler
 
         self._next_generated_user_id = None
 
@@ -63,6 +71,18 @@ class RegistrationHandler(BaseHandler):
         )
         self._server_notices_mxid = hs.config.server_notices_mxid
 
+        if hs.config.worker_app:
+            self._register_client = ReplicationRegisterServlet.make_client(hs)
+            self._register_device_client = (
+                RegisterDeviceReplicationServlet.make_client(hs)
+            )
+            self._post_registration_client = (
+                ReplicationPostRegisterActionsServlet.make_client(hs)
+            )
+        else:
+            self.device_handler = hs.get_device_handler()
+            self.pusher_pool = hs.get_pusherpool()
+
     @defer.inlineCallbacks
     def check_username(self, localpart, guest_access_token=None,
                        assigned_user_id=None):
@@ -126,9 +146,10 @@ class RegistrationHandler(BaseHandler):
         generate_token=True,
         guest_access_token=None,
         make_guest=False,
-        display_name=None,
         admin=False,
         threepid=None,
+        user_type=None,
+        default_display_name=None,
     ):
         """Registers a new client on the server.
 
@@ -143,7 +164,10 @@ class RegistrationHandler(BaseHandler):
               since it offers no means of associating a device_id with the
               access_token. Instead you should call auth_handler.issue_access_token
               after registration.
-            display_name (str): The displayname to set for this user, if any
+            user_type (str|None): type of user. One of the values from
+              api.constants.UserTypes, or None for a normal user.
+            default_display_name (unicode|None): if set, the new user's displayname
+              will be set to this. Defaults to 'localpart'.
         Returns:
             A tuple of (user_id, access_token).
         Raises:
@@ -153,7 +177,7 @@ class RegistrationHandler(BaseHandler):
         yield self.auth.check_auth_blocking(threepid=threepid)
         password_hash = None
         if password:
-            password_hash = yield self.auth_handler().hash(password)
+            password_hash = yield self._auth_handler.hash(password)
 
         if localpart:
             yield self.check_username(localpart, guest_access_token=guest_access_token)
@@ -173,27 +197,30 @@ class RegistrationHandler(BaseHandler):
             user = UserID(localpart, self.hs.hostname)
             user_id = user.to_string()
 
+            if was_guest:
+                # If the user was a guest then they already have a profile
+                default_display_name = None
+
+            elif default_display_name is None:
+                default_display_name = localpart
+
             token = None
             if generate_token:
                 token = self.macaroon_gen.generate_access_token(user_id)
-            yield self.store.register(
+            yield self._register_with_store(
                 user_id=user_id,
                 token=token,
                 password_hash=password_hash,
                 was_guest=was_guest,
                 make_guest=make_guest,
+                create_profile_with_displayname=default_display_name,
                 admin=admin,
+                user_type=user_type,
             )
 
-            if display_name is None:
-                display_name = (
-                    # If the user was a guest then they already have a profile
-                    None if was_guest else user.localpart
-                )
-
-            if display_name:
+            if default_display_name:
                 yield self.profile_handler.set_displayname(
-                    user, None, display_name, by_admin=True,
+                    user, None, default_display_name, by_admin=True,
                 )
 
             if self.hs.config.user_directory_search_all_users:
@@ -214,16 +241,19 @@ class RegistrationHandler(BaseHandler):
                 yield self.check_user_id_not_appservice_exclusive(user_id)
                 if generate_token:
                     token = self.macaroon_gen.generate_access_token(user_id)
+                if default_display_name is None:
+                    default_display_name = localpart
                 try:
-                    yield self.store.register(
+                    yield self._register_with_store(
                         user_id=user_id,
                         token=token,
                         password_hash=password_hash,
                         make_guest=make_guest,
+                        create_profile_with_displayname=default_display_name,
                     )
 
                     yield self.profile_handler.set_displayname(
-                        user, None, user.localpart, by_admin=True,
+                        user, None, default_display_name, by_admin=True,
                     )
 
                 except SynapseError:
@@ -232,16 +262,34 @@ class RegistrationHandler(BaseHandler):
                     user_id = None
                     token = None
                     attempts += 1
+        if not self.hs.config.user_consent_at_registration:
+            yield self._auto_join_rooms(user_id)
+
+        defer.returnValue((user_id, token))
 
+    @defer.inlineCallbacks
+    def _auto_join_rooms(self, user_id):
+        """Automatically joins users to auto join rooms - creating the room in the first place
+        if the user is the first to be created.
+
+        Args:
+            user_id(str): The user to join
+        """
         # auto-join the user to any rooms we're supposed to dump them into
         fake_requester = create_requester(user_id)
 
-        # try to create the room if we're the first user on the server
+        # try to create the room if we're the first real user on the server. Note
+        # that an auto-generated support user is not a real user and will never be
+        # the user to create the room
         should_auto_create_rooms = False
-        if self.hs.config.autocreate_auto_join_rooms:
+        is_support = yield self.store.is_support_user(user_id)
+        # There is an edge case where the first user is the support user, then
+        # the room is never created, though this seems unlikely and
+        # recoverable from given the support user being involved in the first
+        # place.
+        if self.hs.config.autocreate_auto_join_rooms and not is_support:
             count = yield self.store.count_all_users()
             should_auto_create_rooms = count == 1
-
         for r in self.hs.config.auto_join_rooms:
             try:
                 if should_auto_create_rooms:
@@ -255,7 +303,10 @@ class RegistrationHandler(BaseHandler):
                     else:
                         # create room expects the localpart of the room alias
                         room_alias_localpart = room_alias.localpart
-                        yield self.room_creation_handler.create_room(
+
+                        # getting the RoomCreationHandler during init gives a dependency
+                        # loop
+                        yield self.hs.get_room_creation_handler().create_room(
                             fake_requester,
                             config={
                                 "preset": "public_chat",
@@ -268,10 +319,15 @@ class RegistrationHandler(BaseHandler):
             except Exception as e:
                 logger.error("Failed to join new user to %r: %r", r, e)
 
-        # We used to generate default identicons here, but nowadays
-        # we want clients to generate their own as part of their branding
-        # rather than there being consistent matrix-wide ones, so we don't.
-        defer.returnValue((user_id, token))
+    @defer.inlineCallbacks
+    def post_consent_actions(self, user_id):
+        """A series of registration actions that can only be carried out once consent
+        has been granted
+
+        Args:
+            user_id (str): The user to join
+        """
+        yield self._auto_join_rooms(user_id)
 
     @defer.inlineCallbacks
     def appservice_register(self, user_localpart, as_token, password, display_name):
@@ -298,14 +354,17 @@ class RegistrationHandler(BaseHandler):
         if password:
             password_hash = yield self.auth_handler().hash(password)
 
-        yield self.store.register(
+        display_name = display_name or user.localpart
+
+        yield self._register_with_store(
             user_id=user_id,
             password_hash=password_hash,
             appservice_id=service_id,
+            create_profile_with_displayname=display_name,
         )
 
         yield self.profile_handler.set_displayname(
-            user, None, display_name or user.localpart, by_admin=True,
+            user, None, display_name, by_admin=True,
         )
 
         if self.hs.config.user_directory_search_all_users:
@@ -356,10 +415,11 @@ class RegistrationHandler(BaseHandler):
         yield self.check_user_id_not_appservice_exclusive(user_id)
         token = self.macaroon_gen.generate_access_token(user_id)
         try:
-            yield self.store.register(
+            yield self._register_with_store(
                 user_id=user_id,
                 token=token,
                 password_hash=None,
+                create_profile_with_displayname=user.localpart,
             )
 
             yield self.profile_handler.set_displayname(
@@ -383,8 +443,7 @@ class RegistrationHandler(BaseHandler):
             logger.info("validating threepidcred sid %s on id server %s",
                         c['sid'], c['idServer'])
             try:
-                identity_handler = self.hs.get_handlers().identity_handler
-                threepid = yield identity_handler.threepid_from_creds(c)
+                threepid = yield self.identity_handler.threepid_from_creds(c)
             except Exception:
                 logger.exception("Couldn't validate 3pid")
                 raise RegistrationError(400, "Couldn't validate 3pid")
@@ -410,9 +469,8 @@ class RegistrationHandler(BaseHandler):
 
         # Now we have a matrix ID, bind it to the threepids we were given
         for c in threepidCreds:
-            identity_handler = self.hs.get_handlers().identity_handler
             # XXX: This should be a deferred list, shouldn't it?
-            yield identity_handler.bind_threepid(c, user_id)
+            yield self.identity_handler.bind_threepid(c, user_id)
 
     def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
         # don't allow people to register the server notices mxid
@@ -500,7 +558,7 @@ class RegistrationHandler(BaseHandler):
         lines = response.split('\n')
         json = {
             "valid": lines[0] == 'true',
-            "error_url": "http://www.google.com/recaptcha/api/challenge?" +
+            "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" +
                          "error=%s" % lines[1]
         }
         defer.returnValue(json)
@@ -511,7 +569,7 @@ class RegistrationHandler(BaseHandler):
         Used only by c/s api v1
         """
         data = yield self.captcha_client.post_urlencoded_get_raw(
-            "http://www.google.com:80/recaptcha/api/verify",
+            "http://www.recaptcha.net:80/recaptcha/api/verify",
             args={
                 'privatekey': private_key,
                 'remoteip': ip_addr,
@@ -553,14 +611,15 @@ class RegistrationHandler(BaseHandler):
         token = self.macaroon_gen.generate_access_token(user_id)
 
         if need_register:
-            yield self.store.register(
+            yield self._register_with_store(
                 user_id=user_id,
                 token=token,
                 password_hash=password_hash,
+                create_profile_with_displayname=displayname or user.localpart,
             )
             if displayname is not None:
                 yield self.profile_handler.set_displayname(
-                    user, None, displayname, by_admin=True,
+                    user, None, displayname or user.localpart, by_admin=True,
                 )
         else:
             yield self._auth_handler.delete_access_tokens_for_user(user_id)
@@ -568,9 +627,6 @@ class RegistrationHandler(BaseHandler):
 
         defer.returnValue((user_id, token))
 
-    def auth_handler(self):
-        return self.hs.get_auth_handler()
-
     @defer.inlineCallbacks
     def get_or_register_3pid_guest(self, medium, address, inviter_user_id):
         """Get a guest access token for a 3PID, creating a guest account if
@@ -629,3 +685,275 @@ class RegistrationHandler(BaseHandler):
             action="join",
             ratelimit=False,
         )
+
+    def _register_with_store(self, user_id, token=None, password_hash=None,
+                             was_guest=False, make_guest=False, appservice_id=None,
+                             create_profile_with_displayname=None, admin=False,
+                             user_type=None):
+        """Register user in the datastore.
+
+        Args:
+            user_id (str): The desired user ID to register.
+            token (str): The desired access token to use for this user. If this
+                is not None, the given access token is associated with the user
+                id.
+            password_hash (str|None): Optional. The password hash for this user.
+            was_guest (bool): Optional. Whether this is a guest account being
+                upgraded to a non-guest account.
+            make_guest (boolean): True if the the new user should be guest,
+                false to add a regular user account.
+            appservice_id (str|None): The ID of the appservice registering the user.
+            create_profile_with_displayname (unicode|None): Optionally create a
+                profile for the user, setting their displayname to the given value
+            admin (boolean): is an admin user?
+            user_type (str|None): type of user. One of the values from
+                api.constants.UserTypes, or None for a normal user.
+
+        Returns:
+            Deferred
+        """
+        if self.hs.config.worker_app:
+            return self._register_client(
+                user_id=user_id,
+                token=token,
+                password_hash=password_hash,
+                was_guest=was_guest,
+                make_guest=make_guest,
+                appservice_id=appservice_id,
+                create_profile_with_displayname=create_profile_with_displayname,
+                admin=admin,
+                user_type=user_type,
+            )
+        else:
+            return self.store.register(
+                user_id=user_id,
+                token=token,
+                password_hash=password_hash,
+                was_guest=was_guest,
+                make_guest=make_guest,
+                appservice_id=appservice_id,
+                create_profile_with_displayname=create_profile_with_displayname,
+                admin=admin,
+                user_type=user_type,
+            )
+
+    @defer.inlineCallbacks
+    def register_device(self, user_id, device_id, initial_display_name,
+                        is_guest=False):
+        """Register a device for a user and generate an access token.
+
+        Args:
+            user_id (str): full canonical @user:id
+            device_id (str|None): The device ID to check, or None to generate
+                a new one.
+            initial_display_name (str|None): An optional display name for the
+                device.
+            is_guest (bool): Whether this is a guest account
+
+        Returns:
+            defer.Deferred[tuple[str, str]]: Tuple of device ID and access token
+        """
+
+        if self.hs.config.worker_app:
+            r = yield self._register_device_client(
+                user_id=user_id,
+                device_id=device_id,
+                initial_display_name=initial_display_name,
+                is_guest=is_guest,
+            )
+            defer.returnValue((r["device_id"], r["access_token"]))
+        else:
+            device_id = yield self.device_handler.check_device_registered(
+                user_id, device_id, initial_display_name
+            )
+            if is_guest:
+                access_token = self.macaroon_gen.generate_access_token(
+                    user_id, ["guest = true"]
+                )
+            else:
+                access_token = yield self._auth_handler.get_access_token_for_user_id(
+                    user_id, device_id=device_id,
+                )
+
+            defer.returnValue((device_id, access_token))
+
+    @defer.inlineCallbacks
+    def post_registration_actions(self, user_id, auth_result, access_token,
+                                  bind_email, bind_msisdn):
+        """A user has completed registration
+
+        Args:
+            user_id (str): The user ID that consented
+            auth_result (dict): The authenticated credentials of the newly
+                registered user.
+            access_token (str|None): The access token of the newly logged in
+                device, or None if `inhibit_login` enabled.
+            bind_email (bool): Whether to bind the email with the identity
+                server
+            bind_msisdn (bool): Whether to bind the msisdn with the identity
+                server
+        """
+        if self.hs.config.worker_app:
+            yield self._post_registration_client(
+                user_id=user_id,
+                auth_result=auth_result,
+                access_token=access_token,
+                bind_email=bind_email,
+                bind_msisdn=bind_msisdn,
+            )
+            return
+
+        if auth_result and LoginType.EMAIL_IDENTITY in auth_result:
+            threepid = auth_result[LoginType.EMAIL_IDENTITY]
+            # Necessary due to auth checks prior to the threepid being
+            # written to the db
+            if is_threepid_reserved(
+                self.hs.config.mau_limits_reserved_threepids, threepid
+            ):
+                yield self.store.upsert_monthly_active_user(user_id)
+
+            yield self._register_email_threepid(
+                user_id, threepid, access_token,
+                bind_email,
+            )
+
+        if auth_result and LoginType.MSISDN in auth_result:
+            threepid = auth_result[LoginType.MSISDN]
+            yield self._register_msisdn_threepid(
+                user_id, threepid, bind_msisdn,
+            )
+
+        if auth_result and LoginType.TERMS in auth_result:
+            yield self._on_user_consented(
+                user_id, self.hs.config.user_consent_version,
+            )
+
+    @defer.inlineCallbacks
+    def _on_user_consented(self, user_id, consent_version):
+        """A user consented to the terms on registration
+
+        Args:
+            user_id (str): The user ID that consented
+            consent_version (str): version of the policy the user has
+                consented to.
+        """
+        logger.info("%s has consented to the privacy policy", user_id)
+        yield self.store.user_set_consent_version(
+            user_id, consent_version,
+        )
+        yield self.post_consent_actions(user_id)
+
+    @defer.inlineCallbacks
+    def _register_email_threepid(self, user_id, threepid, token, bind_email):
+        """Add an email address as a 3pid identifier
+
+        Also adds an email pusher for the email address, if configured in the
+        HS config
+
+        Also optionally binds emails to the given user_id on the identity server
+
+        Must be called on master.
+
+        Args:
+            user_id (str): id of user
+            threepid (object): m.login.email.identity auth response
+            token (str|None): access_token for the user, or None if not logged
+                in.
+            bind_email (bool): true if the client requested the email to be
+                bound at the identity server
+        Returns:
+            defer.Deferred:
+        """
+        reqd = ('medium', 'address', 'validated_at')
+        if any(x not in threepid for x in reqd):
+            # This will only happen if the ID server returns a malformed response
+            logger.info("Can't add incomplete 3pid")
+            return
+
+        yield self._auth_handler.add_threepid(
+            user_id,
+            threepid['medium'],
+            threepid['address'],
+            threepid['validated_at'],
+        )
+
+        # And we add an email pusher for them by default, but only
+        # if email notifications are enabled (so people don't start
+        # getting mail spam where they weren't before if email
+        # notifs are set up on a home server)
+        if (self.hs.config.email_enable_notifs and
+                self.hs.config.email_notif_for_new_users
+                and token):
+            # Pull the ID of the access token back out of the db
+            # It would really make more sense for this to be passed
+            # up when the access token is saved, but that's quite an
+            # invasive change I'd rather do separately.
+            user_tuple = yield self.store.get_user_by_access_token(
+                token
+            )
+            token_id = user_tuple["token_id"]
+
+            yield self.pusher_pool.add_pusher(
+                user_id=user_id,
+                access_token=token_id,
+                kind="email",
+                app_id="m.email",
+                app_display_name="Email Notifications",
+                device_display_name=threepid["address"],
+                pushkey=threepid["address"],
+                lang=None,  # We don't know a user's language here
+                data={},
+            )
+
+        if bind_email:
+            logger.info("bind_email specified: binding")
+            logger.debug("Binding emails %s to %s" % (
+                threepid, user_id
+            ))
+            yield self.identity_handler.bind_threepid(
+                threepid['threepid_creds'], user_id
+            )
+        else:
+            logger.info("bind_email not specified: not binding email")
+
+    @defer.inlineCallbacks
+    def _register_msisdn_threepid(self, user_id, threepid, bind_msisdn):
+        """Add a phone number as a 3pid identifier
+
+        Also optionally binds msisdn to the given user_id on the identity server
+
+        Must be called on master.
+
+        Args:
+            user_id (str): id of user
+            threepid (object): m.login.msisdn auth response
+            token (str): access_token for the user
+            bind_email (bool): true if the client requested the email to be
+                bound at the identity server
+        Returns:
+            defer.Deferred:
+        """
+        try:
+            assert_params_in_dict(threepid, ['medium', 'address', 'validated_at'])
+        except SynapseError as ex:
+            if ex.errcode == Codes.MISSING_PARAM:
+                # This will only happen if the ID server returns a malformed response
+                logger.info("Can't add incomplete 3pid")
+                defer.returnValue(None)
+            raise
+
+        yield self._auth_handler.add_threepid(
+            user_id,
+            threepid['medium'],
+            threepid['address'],
+            threepid['validated_at'],
+        )
+
+        if bind_msisdn:
+            logger.info("bind_msisdn specified: binding")
+            logger.debug("Binding msisdn %s to %s", threepid, user_id)
+            yield self.identity_handler.bind_threepid(
+                threepid['threepid_creds'], user_id
+            )
+        else:
+            logger.info("bind_msisdn not specified: not binding msisdn")
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 3870e433ec..eb4b437ce8 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -21,7 +21,7 @@ import math
 import string
 from collections import OrderedDict
 
-from six import string_types
+from six import iteritems, string_types
 
 from twisted.internet import defer
 
@@ -32,10 +32,11 @@ from synapse.api.constants import (
     JoinRules,
     RoomCreationPreset,
 )
-from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
+from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
 from synapse.storage.state import StateFilter
 from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
 from synapse.util import stringutils
+from synapse.util.async_helpers import Linearizer
 from synapse.visibility import filter_events_for_client
 
 from ._base import BaseHandler
@@ -75,6 +76,372 @@ class RoomCreationHandler(BaseHandler):
 
         self.spam_checker = hs.get_spam_checker()
         self.event_creation_handler = hs.get_event_creation_handler()
+        self.room_member_handler = hs.get_room_member_handler()
+
+        # linearizer to stop two upgrades happening at once
+        self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
+
+    @defer.inlineCallbacks
+    def upgrade_room(self, requester, old_room_id, new_version):
+        """Replace a room with a new room with a different version
+
+        Args:
+            requester (synapse.types.Requester): the user requesting the upgrade
+            old_room_id (unicode): the id of the room to be replaced
+            new_version (unicode): the new room version to use
+
+        Returns:
+            Deferred[unicode]: the new room id
+        """
+        yield self.ratelimit(requester)
+
+        user_id = requester.user.to_string()
+
+        with (yield self._upgrade_linearizer.queue(old_room_id)):
+            # start by allocating a new room id
+            r = yield self.store.get_room(old_room_id)
+            if r is None:
+                raise NotFoundError("Unknown room id %s" % (old_room_id,))
+            new_room_id = yield self._generate_room_id(
+                creator_id=user_id, is_public=r["is_public"],
+            )
+
+            logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
+
+            # we create and auth the tombstone event before properly creating the new
+            # room, to check our user has perms in the old room.
+            tombstone_event, tombstone_context = (
+                yield self.event_creation_handler.create_event(
+                    requester, {
+                        "type": EventTypes.Tombstone,
+                        "state_key": "",
+                        "room_id": old_room_id,
+                        "sender": user_id,
+                        "content": {
+                            "body": "This room has been replaced",
+                            "replacement_room": new_room_id,
+                        }
+                    },
+                    token_id=requester.access_token_id,
+                )
+            )
+            old_room_version = yield self.store.get_room_version(old_room_id)
+            yield self.auth.check_from_context(
+                old_room_version, tombstone_event, tombstone_context,
+            )
+
+            yield self.clone_existing_room(
+                requester,
+                old_room_id=old_room_id,
+                new_room_id=new_room_id,
+                new_room_version=new_version,
+                tombstone_event_id=tombstone_event.event_id,
+            )
+
+            # now send the tombstone
+            yield self.event_creation_handler.send_nonmember_event(
+                requester, tombstone_event, tombstone_context,
+            )
+
+            old_room_state = yield tombstone_context.get_current_state_ids(self.store)
+
+            # update any aliases
+            yield self._move_aliases_to_new_room(
+                requester, old_room_id, new_room_id, old_room_state,
+            )
+
+            # and finally, shut down the PLs in the old room, and update them in the new
+            # room.
+            yield self._update_upgraded_room_pls(
+                requester, old_room_id, new_room_id, old_room_state,
+            )
+
+            defer.returnValue(new_room_id)
+
+    @defer.inlineCallbacks
+    def _update_upgraded_room_pls(
+            self, requester, old_room_id, new_room_id, old_room_state,
+    ):
+        """Send updated power levels in both rooms after an upgrade
+
+        Args:
+            requester (synapse.types.Requester): the user requesting the upgrade
+            old_room_id (unicode): the id of the room to be replaced
+            new_room_id (unicode): the id of the replacement room
+            old_room_state (dict[tuple[str, str], str]): the state map for the old room
+
+        Returns:
+            Deferred
+        """
+        old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, ""))
+
+        if old_room_pl_event_id is None:
+            logger.warning(
+                "Not supported: upgrading a room with no PL event. Not setting PLs "
+                "in old room.",
+            )
+            return
+
+        old_room_pl_state = yield self.store.get_event(old_room_pl_event_id)
+
+        # we try to stop regular users from speaking by setting the PL required
+        # to send regular events and invites to 'Moderator' level. That's normally
+        # 50, but if the default PL in a room is 50 or more, then we set the
+        # required PL above that.
+
+        pl_content = dict(old_room_pl_state.content)
+        users_default = int(pl_content.get("users_default", 0))
+        restricted_level = max(users_default + 1, 50)
+
+        updated = False
+        for v in ("invite", "events_default"):
+            current = int(pl_content.get(v, 0))
+            if current < restricted_level:
+                logger.info(
+                    "Setting level for %s in %s to %i (was %i)",
+                    v, old_room_id, restricted_level, current,
+                )
+                pl_content[v] = restricted_level
+                updated = True
+            else:
+                logger.info(
+                    "Not setting level for %s (already %i)",
+                    v, current,
+                )
+
+        if updated:
+            try:
+                yield self.event_creation_handler.create_and_send_nonmember_event(
+                    requester, {
+                        "type": EventTypes.PowerLevels,
+                        "state_key": '',
+                        "room_id": old_room_id,
+                        "sender": requester.user.to_string(),
+                        "content": pl_content,
+                    }, ratelimit=False,
+                )
+            except AuthError as e:
+                logger.warning("Unable to update PLs in old room: %s", e)
+
+        logger.info("Setting correct PLs in new room")
+        yield self.event_creation_handler.create_and_send_nonmember_event(
+            requester, {
+                "type": EventTypes.PowerLevels,
+                "state_key": '',
+                "room_id": new_room_id,
+                "sender": requester.user.to_string(),
+                "content": old_room_pl_state.content,
+            }, ratelimit=False,
+        )
+
+    @defer.inlineCallbacks
+    def clone_existing_room(
+            self, requester, old_room_id, new_room_id, new_room_version,
+            tombstone_event_id,
+    ):
+        """Populate a new room based on an old room
+
+        Args:
+            requester (synapse.types.Requester): the user requesting the upgrade
+            old_room_id (unicode): the id of the room to be replaced
+            new_room_id (unicode): the id to give the new room (should already have been
+                created with _gemerate_room_id())
+            new_room_version (unicode): the new room version to use
+            tombstone_event_id (unicode|str): the ID of the tombstone event in the old
+                room.
+        Returns:
+            Deferred[None]
+        """
+        user_id = requester.user.to_string()
+
+        if not self.spam_checker.user_may_create_room(user_id):
+            raise SynapseError(403, "You are not permitted to create rooms")
+
+        creation_content = {
+            "room_version": new_room_version,
+            "predecessor": {
+                "room_id": old_room_id,
+                "event_id": tombstone_event_id,
+            }
+        }
+
+        # Check if old room was non-federatable
+
+        # Get old room's create event
+        old_room_create_event = yield self.store.get_create_event_for_room(old_room_id)
+
+        # Check if the create event specified a non-federatable room
+        if not old_room_create_event.content.get("m.federate", True):
+            # If so, mark the new room as non-federatable as well
+            creation_content["m.federate"] = False
+
+        initial_state = dict()
+
+        # Replicate relevant room events
+        types_to_copy = (
+            (EventTypes.JoinRules, ""),
+            (EventTypes.Name, ""),
+            (EventTypes.Topic, ""),
+            (EventTypes.RoomHistoryVisibility, ""),
+            (EventTypes.GuestAccess, ""),
+            (EventTypes.RoomAvatar, ""),
+            (EventTypes.Encryption, ""),
+            (EventTypes.ServerACL, ""),
+        )
+
+        old_room_state_ids = yield self.store.get_filtered_current_state_ids(
+            old_room_id, StateFilter.from_types(types_to_copy),
+        )
+        # map from event_id to BaseEvent
+        old_room_state_events = yield self.store.get_events(old_room_state_ids.values())
+
+        for k, old_event_id in iteritems(old_room_state_ids):
+            old_event = old_room_state_events.get(old_event_id)
+            if old_event:
+                initial_state[k] = old_event.content
+
+        yield self._send_events_for_new_room(
+            requester,
+            new_room_id,
+
+            # we expect to override all the presets with initial_state, so this is
+            # somewhat arbitrary.
+            preset_config=RoomCreationPreset.PRIVATE_CHAT,
+
+            invite_list=[],
+            initial_state=initial_state,
+            creation_content=creation_content,
+        )
+
+        # Transfer membership events
+        old_room_member_state_ids = yield self.store.get_filtered_current_state_ids(
+            old_room_id, StateFilter.from_types([(EventTypes.Member, None)]),
+        )
+
+        # map from event_id to BaseEvent
+        old_room_member_state_events = yield self.store.get_events(
+            old_room_member_state_ids.values(),
+        )
+        for k, old_event in iteritems(old_room_member_state_events):
+            # Only transfer ban events
+            if ("membership" in old_event.content and
+                    old_event.content["membership"] == "ban"):
+                yield self.room_member_handler.update_membership(
+                    requester,
+                    UserID.from_string(old_event['state_key']),
+                    new_room_id,
+                    "ban",
+                    ratelimit=False,
+                    content=old_event.content,
+                )
+
+        # XXX invites/joins
+        # XXX 3pid invites
+
+    @defer.inlineCallbacks
+    def _move_aliases_to_new_room(
+            self, requester, old_room_id, new_room_id, old_room_state,
+    ):
+        directory_handler = self.hs.get_handlers().directory_handler
+
+        aliases = yield self.store.get_aliases_for_room(old_room_id)
+
+        # check to see if we have a canonical alias.
+        canonical_alias = None
+        canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
+        if canonical_alias_event_id:
+            canonical_alias_event = yield self.store.get_event(canonical_alias_event_id)
+            if canonical_alias_event:
+                canonical_alias = canonical_alias_event.content.get("alias", "")
+
+        # first we try to remove the aliases from the old room (we suppress sending
+        # the room_aliases event until the end).
+        #
+        # Note that we'll only be able to remove aliases that (a) aren't owned by an AS,
+        # and (b) unless the user is a server admin, which the user created.
+        #
+        # This is probably correct - given we don't allow such aliases to be deleted
+        # normally, it would be odd to allow it in the case of doing a room upgrade -
+        # but it makes the upgrade less effective, and you have to wonder why a room
+        # admin can't remove aliases that point to that room anyway.
+        # (cf https://github.com/matrix-org/synapse/issues/2360)
+        #
+        removed_aliases = []
+        for alias_str in aliases:
+            alias = RoomAlias.from_string(alias_str)
+            try:
+                yield directory_handler.delete_association(
+                    requester, alias, send_event=False,
+                )
+                removed_aliases.append(alias_str)
+            except SynapseError as e:
+                logger.warning(
+                    "Unable to remove alias %s from old room: %s",
+                    alias, e,
+                )
+
+        # if we didn't find any aliases, or couldn't remove anyway, we can skip the rest
+        # of this.
+        if not removed_aliases:
+            return
+
+        try:
+            # this can fail if, for some reason, our user doesn't have perms to send
+            # m.room.aliases events in the old room (note that we've already checked that
+            # they have perms to send a tombstone event, so that's not terribly likely).
+            #
+            # If that happens, it's regrettable, but we should carry on: it's the same
+            # as when you remove an alias from the directory normally - it just means that
+            # the aliases event gets out of sync with the directory
+            # (cf https://github.com/vector-im/riot-web/issues/2369)
+            yield directory_handler.send_room_alias_update_event(
+                requester, old_room_id,
+            )
+        except AuthError as e:
+            logger.warning(
+                "Failed to send updated alias event on old room: %s", e,
+            )
+
+        # we can now add any aliases we successfully removed to the new room.
+        for alias in removed_aliases:
+            try:
+                yield directory_handler.create_association(
+                    requester, RoomAlias.from_string(alias),
+                    new_room_id, servers=(self.hs.hostname, ),
+                    send_event=False,
+                )
+                logger.info("Moved alias %s to new room", alias)
+            except SynapseError as e:
+                # I'm not really expecting this to happen, but it could if the spam
+                # checking module decides it shouldn't, or similar.
+                logger.error(
+                    "Error adding alias %s to new room: %s",
+                    alias, e,
+                )
+
+        try:
+            if canonical_alias and (canonical_alias in removed_aliases):
+                yield self.event_creation_handler.create_and_send_nonmember_event(
+                    requester,
+                    {
+                        "type": EventTypes.CanonicalAlias,
+                        "state_key": "",
+                        "room_id": new_room_id,
+                        "sender": requester.user.to_string(),
+                        "content": {"alias": canonical_alias, },
+                    },
+                    ratelimit=False
+                )
+
+            yield directory_handler.send_room_alias_update_event(
+                requester, new_room_id,
+            )
+        except SynapseError as e:
+            # again I'm not really expecting this to fail, but if it does, I'd rather
+            # we returned the new room to the client at this point.
+            logger.error(
+                "Unable to send updated alias events in new room: %s", e,
+            )
 
     @defer.inlineCallbacks
     def create_room(self, requester, config, ratelimit=True,
@@ -106,7 +473,7 @@ class RoomCreationHandler(BaseHandler):
         """
         user_id = requester.user.to_string()
 
-        self.auth.check_auth_blocking(user_id)
+        yield self.auth.check_auth_blocking(user_id)
 
         if not self.spam_checker.user_may_create_room(user_id):
             raise SynapseError(403, "You are not permitted to create rooms")
@@ -167,28 +534,7 @@ class RoomCreationHandler(BaseHandler):
         visibility = config.get("visibility", None)
         is_public = visibility == "public"
 
-        # autogen room IDs and try to create it. We may clash, so just
-        # try a few times till one goes through, giving up eventually.
-        attempts = 0
-        room_id = None
-        while attempts < 5:
-            try:
-                random_string = stringutils.random_string(18)
-                gen_room_id = RoomID(
-                    random_string,
-                    self.hs.hostname,
-                )
-                yield self.store.store_room(
-                    room_id=gen_room_id.to_string(),
-                    room_creator_user_id=user_id,
-                    is_public=is_public
-                )
-                room_id = gen_room_id.to_string()
-                break
-            except StoreError:
-                attempts += 1
-        if not room_id:
-            raise StoreError(500, "Couldn't generate a room ID.")
+        room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
 
         if room_alias:
             directory_handler = self.hs.get_handlers().directory_handler
@@ -218,18 +564,15 @@ class RoomCreationHandler(BaseHandler):
         # override any attempt to set room versions via the creation_content
         creation_content["room_version"] = room_version
 
-        room_member_handler = self.hs.get_room_member_handler()
-
         yield self._send_events_for_new_room(
             requester,
             room_id,
-            room_member_handler,
             preset_config=preset_config,
             invite_list=invite_list,
             initial_state=initial_state,
             creation_content=creation_content,
             room_alias=room_alias,
-            power_level_content_override=config.get("power_level_content_override", {}),
+            power_level_content_override=config.get("power_level_content_override"),
             creator_join_profile=creator_join_profile,
         )
 
@@ -265,7 +608,7 @@ class RoomCreationHandler(BaseHandler):
             if is_direct:
                 content["is_direct"] = is_direct
 
-            yield room_member_handler.update_membership(
+            yield self.room_member_handler.update_membership(
                 requester,
                 UserID.from_string(invitee),
                 room_id,
@@ -303,14 +646,13 @@ class RoomCreationHandler(BaseHandler):
             self,
             creator,  # A Requester object.
             room_id,
-            room_member_handler,
             preset_config,
             invite_list,
             initial_state,
             creation_content,
-            room_alias,
-            power_level_content_override,
-            creator_join_profile,
+            room_alias=None,
+            power_level_content_override=None,
+            creator_join_profile=None,
     ):
         def create(etype, content, **kwargs):
             e = {
@@ -326,6 +668,7 @@ class RoomCreationHandler(BaseHandler):
         @defer.inlineCallbacks
         def send(etype, content, **kwargs):
             event = create(etype, content, **kwargs)
+            logger.info("Sending %s in new room", etype)
             yield self.event_creation_handler.create_and_send_nonmember_event(
                 creator,
                 event,
@@ -348,7 +691,8 @@ class RoomCreationHandler(BaseHandler):
             content=creation_content,
         )
 
-        yield room_member_handler.update_membership(
+        logger.info("Sending %s in new room", EventTypes.Member)
+        yield self.room_member_handler.update_membership(
             creator,
             creator.user,
             room_id,
@@ -390,7 +734,8 @@ class RoomCreationHandler(BaseHandler):
                 for invitee in invite_list:
                     power_level_content["users"][invitee] = 100
 
-            power_level_content.update(power_level_content_override)
+            if power_level_content_override:
+                power_level_content.update(power_level_content_override)
 
             yield send(
                 etype=EventTypes.PowerLevels,
@@ -430,7 +775,7 @@ class RoomCreationHandler(BaseHandler):
             )
 
         if "encryption_alg" in config:
-            send(
+            yield send(
                 etype=EventTypes.Encryption,
                 state_key="",
                 content={
@@ -438,6 +783,30 @@ class RoomCreationHandler(BaseHandler):
                 }
             )
 
+    @defer.inlineCallbacks
+    def _generate_room_id(self, creator_id, is_public):
+        # autogen room IDs and try to create it. We may clash, so just
+        # try a few times till one goes through, giving up eventually.
+        attempts = 0
+        while attempts < 5:
+            try:
+                random_string = stringutils.random_string(18)
+                gen_room_id = RoomID(
+                    random_string,
+                    self.hs.hostname,
+                ).to_string()
+                if isinstance(gen_room_id, bytes):
+                    gen_room_id = gen_room_id.decode('utf-8')
+                yield self.store.store_room(
+                    room_id=gen_room_id,
+                    room_creator_user_id=creator_id,
+                    is_public=is_public,
+                )
+                defer.returnValue(gen_room_id)
+            except StoreError:
+                attempts += 1
+        raise StoreError(500, "Couldn't generate a room ID.")
+
 
 class RoomContextHandler(object):
     def __init__(self, hs):
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index 4f51a464e7..afa508d729 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -74,8 +74,14 @@ class RoomListHandler(BaseHandler):
             # We explicitly don't bother caching searches or requests for
             # appservice specific lists.
             logger.info("Bypassing cache as search request.")
+
+            # XXX: Quick hack to stop room directory queries taking too long.
+            # Timeout request after 60s. Probably want a more fundamental
+            # solution at some point
+            timeout = self.clock.time() + 60
             return self._get_public_room_list(
-                limit, since_token, search_filter, network_tuple=network_tuple,
+                limit, since_token, search_filter,
+                network_tuple=network_tuple, timeout=timeout,
             )
 
         key = (limit, since_token, network_tuple)
@@ -90,7 +96,22 @@ class RoomListHandler(BaseHandler):
     def _get_public_room_list(self, limit=None, since_token=None,
                               search_filter=None,
                               network_tuple=EMPTY_THIRD_PARTY_ID,
-                              from_federation=False,):
+                              from_federation=False,
+                              timeout=None,):
+        """Generate a public room list.
+        Args:
+            limit (int|None): Maximum amount of rooms to return.
+            since_token (str|None)
+            search_filter (dict|None): Dictionary to filter rooms by.
+            network_tuple (ThirdPartyInstanceID): Which public list to use.
+                This can be (None, None) to indicate the main list, or a particular
+                appservice and network id to use an appservice specific one.
+                Setting to None returns all public rooms across all lists.
+            from_federation (bool): Whether this request originated from a
+                federating server or a client. Used for room filtering.
+            timeout (int|None): Amount of seconds to wait for a response before
+                timing out.
+        """
         if since_token and since_token != "END":
             since_token = RoomListNextBatch.from_token(since_token)
         else:
@@ -205,6 +226,9 @@ class RoomListHandler(BaseHandler):
 
         chunk = []
         for i in range(0, len(rooms_to_scan), step):
+            if timeout and self.clock.time() > timeout:
+                raise Exception("Timed out searching room directory")
+
             batch = rooms_to_scan[i:i + step]
             logger.info("Processing %i rooms for result", len(batch))
             yield concurrently_execute(
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 07fd3e82fc..190ea2c7b1 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -61,9 +61,9 @@ class RoomMemberHandler(object):
 
         self.federation_handler = hs.get_handlers().federation_handler
         self.directory_handler = hs.get_handlers().directory_handler
-        self.registration_handler = hs.get_handlers().registration_handler
+        self.registration_handler = hs.get_registration_handler()
         self.profile_handler = hs.get_profile_handler()
-        self.event_creation_hander = hs.get_event_creation_handler()
+        self.event_creation_handler = hs.get_event_creation_handler()
 
         self.member_linearizer = Linearizer(name="member")
 
@@ -161,6 +161,8 @@ class RoomMemberHandler(object):
         ratelimit=True,
         content=None,
     ):
+        user_id = target.to_string()
+
         if content is None:
             content = {}
 
@@ -168,14 +170,14 @@ class RoomMemberHandler(object):
         if requester.is_guest:
             content["kind"] = "guest"
 
-        event, context = yield self.event_creation_hander.create_event(
+        event, context = yield self.event_creation_handler.create_event(
             requester,
             {
                 "type": EventTypes.Member,
                 "content": content,
                 "room_id": room_id,
                 "sender": requester.user.to_string(),
-                "state_key": target.to_string(),
+                "state_key": user_id,
 
                 # For backwards compatibility:
                 "membership": membership,
@@ -186,14 +188,14 @@ class RoomMemberHandler(object):
         )
 
         # Check if this event matches the previous membership event for the user.
-        duplicate = yield self.event_creation_hander.deduplicate_state_event(
+        duplicate = yield self.event_creation_handler.deduplicate_state_event(
             event, context,
         )
         if duplicate is not None:
             # Discard the new event since this membership change is a no-op.
             defer.returnValue(duplicate)
 
-        yield self.event_creation_hander.handle_new_client_event(
+        yield self.event_creation_handler.handle_new_client_event(
             requester,
             event,
             context,
@@ -204,12 +206,12 @@ class RoomMemberHandler(object):
         prev_state_ids = yield context.get_prev_state_ids(self.store)
 
         prev_member_event_id = prev_state_ids.get(
-            (EventTypes.Member, target.to_string()),
+            (EventTypes.Member, user_id),
             None
         )
 
         if event.membership == Membership.JOIN:
-            # Only fire user_joined_room if the user has acutally joined the
+            # Only fire user_joined_room if the user has actually joined the
             # room. Don't bother if the user is just changing their profile
             # info.
             newly_joined = True
@@ -218,6 +220,18 @@ class RoomMemberHandler(object):
                 newly_joined = prev_member_event.membership != Membership.JOIN
             if newly_joined:
                 yield self._user_joined_room(target, room_id)
+
+            # Copy over direct message status and room tags if this is a join
+            # on an upgraded room
+
+            # Check if this is an upgraded room
+            predecessor = yield self.store.get_room_predecessor(room_id)
+
+            if predecessor:
+                # It is an upgraded room. Copy over old tags
+                self.copy_room_tags_and_direct_to_room(
+                    predecessor["room_id"], room_id, user_id,
+                )
         elif event.membership == Membership.LEAVE:
             if prev_member_event_id:
                 prev_member_event = yield self.store.get_event(prev_member_event_id)
@@ -227,6 +241,55 @@ class RoomMemberHandler(object):
         defer.returnValue(event)
 
     @defer.inlineCallbacks
+    def copy_room_tags_and_direct_to_room(
+        self,
+        old_room_id,
+        new_room_id,
+        user_id,
+    ):
+        """Copies the tags and direct room state from one room to another.
+
+        Args:
+            old_room_id (str)
+            new_room_id (str)
+            user_id (str)
+
+        Returns:
+            Deferred[None]
+        """
+        # Retrieve user account data for predecessor room
+        user_account_data, _ = yield self.store.get_account_data_for_user(
+            user_id,
+        )
+
+        # Copy direct message state if applicable
+        direct_rooms = user_account_data.get("m.direct", {})
+
+        # Check which key this room is under
+        if isinstance(direct_rooms, dict):
+            for key, room_id_list in direct_rooms.items():
+                if old_room_id in room_id_list and new_room_id not in room_id_list:
+                    # Add new room_id to this key
+                    direct_rooms[key].append(new_room_id)
+
+                    # Save back to user's m.direct account data
+                    yield self.store.add_account_data_for_user(
+                        user_id, "m.direct", direct_rooms,
+                    )
+                    break
+
+        # Copy room tags if applicable
+        room_tags = yield self.store.get_tags_for_room(
+            user_id, old_room_id,
+        )
+
+        # Copy each room tag to the new room
+        for tag, tag_content in room_tags.items():
+            yield self.store.add_tag_to_room(
+                user_id, new_room_id, tag, tag_content
+            )
+
+    @defer.inlineCallbacks
     def update_membership(
             self,
             requester,
@@ -493,7 +556,7 @@ class RoomMemberHandler(object):
         else:
             requester = synapse.types.create_requester(target_user)
 
-        prev_event = yield self.event_creation_hander.deduplicate_state_event(
+        prev_event = yield self.event_creation_handler.deduplicate_state_event(
             event, context,
         )
         if prev_event is not None:
@@ -513,7 +576,7 @@ class RoomMemberHandler(object):
             if is_blocked:
                 raise SynapseError(403, "This room has been blocked on this server")
 
-        yield self.event_creation_hander.handle_new_client_event(
+        yield self.event_creation_handler.handle_new_client_event(
             requester,
             event,
             context,
@@ -527,7 +590,7 @@ class RoomMemberHandler(object):
         )
 
         if event.membership == Membership.JOIN:
-            # Only fire user_joined_room if the user has acutally joined the
+            # Only fire user_joined_room if the user has actually joined the
             # room. Don't bother if the user is just changing their profile
             # info.
             newly_joined = True
@@ -755,7 +818,7 @@ class RoomMemberHandler(object):
             )
         )
 
-        yield self.event_creation_hander.create_and_send_nonmember_event(
+        yield self.event_creation_handler.create_and_send_nonmember_event(
             requester,
             {
                 "type": EventTypes.ThirdPartyInvite,
@@ -877,7 +940,8 @@ class RoomMemberHandler(object):
         # first member event?
         create_event_id = current_state_ids.get(("m.room.create", ""))
         if len(current_state_ids) == 1 and create_event_id:
-            defer.returnValue(self.hs.is_mine_id(create_event_id))
+            # We can only get here if we're in the process of creating the room
+            defer.returnValue(True)
 
         for etype, state_key in current_state_ids:
             if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 80e7b15de8..49c439313e 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -38,6 +38,41 @@ class SearchHandler(BaseHandler):
         super(SearchHandler, self).__init__(hs)
 
     @defer.inlineCallbacks
+    def get_old_rooms_from_upgraded_room(self, room_id):
+        """Retrieves room IDs of old rooms in the history of an upgraded room.
+
+        We do so by checking the m.room.create event of the room for a
+        `predecessor` key. If it exists, we add the room ID to our return
+        list and then check that room for a m.room.create event and so on
+        until we can no longer find any more previous rooms.
+
+        The full list of all found rooms in then returned.
+
+        Args:
+            room_id (str): id of the room to search through.
+
+        Returns:
+            Deferred[iterable[unicode]]: predecessor room ids
+        """
+
+        historical_room_ids = []
+
+        while True:
+            predecessor = yield self.store.get_room_predecessor(room_id)
+
+            # If no predecessor, assume we've hit a dead end
+            if not predecessor:
+                break
+
+            # Add predecessor's room ID
+            historical_room_ids.append(predecessor["room_id"])
+
+            # Scan through the old room for further predecessors
+            room_id = predecessor["room_id"]
+
+        defer.returnValue(historical_room_ids)
+
+    @defer.inlineCallbacks
     def search(self, user, content, batch=None):
         """Performs a full text search for a user.
 
@@ -50,6 +85,9 @@ class SearchHandler(BaseHandler):
             dict to be returned to the client with results of search
         """
 
+        if not self.hs.config.enable_search:
+            raise SynapseError(400, "Search is disabled on this homeserver")
+
         batch_group = None
         batch_group_key = None
         batch_token = None
@@ -134,6 +172,18 @@ class SearchHandler(BaseHandler):
         )
         room_ids = set(r.room_id for r in rooms)
 
+        # If doing a subset of all rooms seearch, check if any of the rooms
+        # are from an upgraded room, and search their contents as well
+        if search_filter.rooms:
+            historical_room_ids = []
+            for room_id in search_filter.rooms:
+                # Add any previous rooms to the search if they exist
+                ids = yield self.get_old_rooms_from_upgraded_room(room_id)
+                historical_room_ids += ids
+
+            # Prevent any historical events from being filtered
+            search_filter = search_filter.with_room_ids(historical_room_ids)
+
         room_ids = search_filter.filter_rooms(room_ids)
 
         if batch_group == "room_id":
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index c802db9792..26e3f11ba1 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -895,14 +895,17 @@ class SyncHandler(object):
         Returns:
             Deferred(SyncResult)
         """
-        logger.info("Calculating sync response for %r", sync_config.user)
-
         # NB: The now_token gets changed by some of the generate_sync_* methods,
         # this is due to some of the underlying streams not supporting the ability
         # to query up to a given point.
         # Always use the `now_token` in `SyncResultBuilder`
         now_token = yield self.event_sources.get_current_token()
 
+        logger.info(
+            "Calculating sync response for %r between %s and %s",
+            sync_config.user, since_token, now_token,
+        )
+
         user_id = sync_config.user.to_string()
         app_service = self.store.get_app_service_by_user_id(user_id)
         if app_service:
@@ -1396,6 +1399,12 @@ class SyncHandler(object):
         room_entries = []
         invited = []
         for room_id, events in iteritems(mem_change_events_by_room_id):
+            logger.info(
+                "Membership changes in %s: [%s]",
+                room_id,
+                ", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)),
+            )
+
             non_joins = [e for e in events if e.membership != Membership.JOIN]
             has_join = len(non_joins) != len(events)
 
@@ -1479,10 +1488,22 @@ class SyncHandler(object):
                 if since_token and since_token.is_after(leave_token):
                     continue
 
+                # If this is an out of band message, like a remote invite
+                # rejection, we include it in the recents batch. Otherwise, we
+                # let _load_filtered_recents handle fetching the correct
+                # batches.
+                #
+                # This is all screaming out for a refactor, as the logic here is
+                # subtle and the moving parts numerous.
+                if leave_event.internal_metadata.is_out_of_band_membership():
+                    batch_events = [leave_event]
+                else:
+                    batch_events = None
+
                 room_entries.append(RoomSyncResultBuilder(
                     room_id=room_id,
                     rtype="archived",
-                    events=None,
+                    events=batch_events,
                     newly_joined=room_id in newly_joined_rooms,
                     full_state=False,
                     since_token=since_token,
@@ -1674,13 +1695,17 @@ class SyncHandler(object):
                 "content": content,
             })
 
-        account_data = sync_config.filter_collection.filter_room_account_data(
+        account_data_events = sync_config.filter_collection.filter_room_account_data(
             account_data_events
         )
 
         ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral)
 
-        if not (always_include or batch or account_data or ephemeral or full_state):
+        if not (always_include
+                or batch
+                or account_data_events
+                or ephemeral
+                or full_state):
             return
 
         state = yield self.compute_state_delta(
@@ -1751,7 +1776,7 @@ class SyncHandler(object):
                 room_id=room_id,
                 timeline=batch,
                 state=state,
-                account_data=account_data,
+                account_data=account_data_events,
             )
             if room_sync or always_include:
                 sync_result_builder.archived.append(room_sync)
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index c610933dd4..a61bbf9392 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -63,11 +63,8 @@ class TypingHandler(object):
         self._member_typing_until = {}  # clock time we expect to stop
         self._member_last_federation_poke = {}
 
-        # map room IDs to serial numbers
-        self._room_serials = {}
         self._latest_room_serial = 0
-        # map room IDs to sets of users currently typing
-        self._room_typing = {}
+        self._reset()
 
         # caches which room_ids changed at which serials
         self._typing_stream_change_cache = StreamChangeCache(
@@ -79,6 +76,15 @@ class TypingHandler(object):
             5000,
         )
 
+    def _reset(self):
+        """
+        Reset the typing handler's data caches.
+        """
+        # map room IDs to serial numbers
+        self._room_serials = {}
+        # map room IDs to sets of users currently typing
+        self._room_typing = {}
+
     def _handle_timeouts(self):
         logger.info("Checking for typing timeouts")
 
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index f11b430126..283c6c1b81 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -19,6 +19,7 @@ from six import iteritems
 
 from twisted.internet import defer
 
+import synapse.metrics
 from synapse.api.constants import EventTypes, JoinRules, Membership
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.roommember import ProfileInfo
@@ -125,9 +126,12 @@ class UserDirectoryHandler(object):
         """
         # FIXME(#3714): We should probably do this in the same worker as all
         # the other changes.
-        yield self.store.update_profile_in_user_dir(
-            user_id, profile.display_name, profile.avatar_url, None,
-        )
+        is_support = yield self.store.is_support_user(user_id)
+        # Support users are for diagnostics and should not appear in the user directory.
+        if not is_support:
+            yield self.store.update_profile_in_user_dir(
+                user_id, profile.display_name, profile.avatar_url, None
+            )
 
     @defer.inlineCallbacks
     def handle_user_deactivated(self, user_id):
@@ -160,6 +164,12 @@ class UserDirectoryHandler(object):
                 yield self._handle_deltas(deltas)
 
                 self.pos = deltas[-1]["stream_id"]
+
+                # Expose current event processing position to prometheus
+                synapse.metrics.event_processing_positions.labels("user_dir").set(
+                    self.pos
+                )
+
                 yield self.store.update_user_directory_stream_pos(self.pos)
 
     @defer.inlineCallbacks
@@ -182,21 +192,25 @@ class UserDirectoryHandler(object):
             logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids))
             yield self._handle_initial_room(room_id)
             num_processed_rooms += 1
-            yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
+            yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
 
         logger.info("Processed all rooms.")
 
         if self.search_all_users:
             num_processed_users = 0
             user_ids = yield self.store.get_all_local_users()
-            logger.info("Doing initial update of user directory. %d users", len(user_ids))
+            logger.info(
+                "Doing initial update of user directory. %d users", len(user_ids)
+            )
             for user_id in user_ids:
                 # We add profiles for all users even if they don't match the
                 # include pattern, just in case we want to change it in future
-                logger.info("Handling user %d/%d", num_processed_users + 1, len(user_ids))
+                logger.info(
+                    "Handling user %d/%d", num_processed_users + 1, len(user_ids)
+                )
                 yield self._handle_local_user(user_id)
                 num_processed_users += 1
-                yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.)
+                yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.0)
 
             logger.info("Processed all users")
 
@@ -215,24 +229,24 @@ class UserDirectoryHandler(object):
         if not is_in_room:
             return
 
-        is_public = yield self.store.is_room_world_readable_or_publicly_joinable(room_id)
+        is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
+            room_id
+        )
 
         users_with_profile = yield self.state.get_current_user_in_room(room_id)
         user_ids = set(users_with_profile)
         unhandled_users = user_ids - self.initially_handled_users
 
         yield self.store.add_profiles_to_user_dir(
-            room_id, {
-                user_id: users_with_profile[user_id] for user_id in unhandled_users
-            }
+            room_id,
+            {user_id: users_with_profile[user_id] for user_id in unhandled_users},
         )
 
         self.initially_handled_users |= unhandled_users
 
         if is_public:
             yield self.store.add_users_to_public_room(
-                room_id,
-                user_ids=user_ids - self.initially_handled_users_in_public
+                room_id, user_ids=user_ids - self.initially_handled_users_in_public
             )
             self.initially_handled_users_in_public |= user_ids
 
@@ -244,7 +258,7 @@ class UserDirectoryHandler(object):
         count = 0
         for user_id in user_ids:
             if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
-                yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
+                yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
 
             if not self.is_mine_id(user_id):
                 count += 1
@@ -259,7 +273,7 @@ class UserDirectoryHandler(object):
                     continue
 
                 if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
-                    yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
+                    yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
                 count += 1
 
                 user_set = (user_id, other_user_id)
@@ -281,25 +295,23 @@ class UserDirectoryHandler(object):
 
                 if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
                     yield self.store.add_users_who_share_room(
-                        room_id, not is_public, to_insert,
+                        room_id, not is_public, to_insert
                     )
                     to_insert.clear()
 
                 if len(to_update) > self.INITIAL_ROOM_BATCH_SIZE:
                     yield self.store.update_users_who_share_room(
-                        room_id, not is_public, to_update,
+                        room_id, not is_public, to_update
                     )
                     to_update.clear()
 
         if to_insert:
-            yield self.store.add_users_who_share_room(
-                room_id, not is_public, to_insert,
-            )
+            yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
             to_insert.clear()
 
         if to_update:
             yield self.store.update_users_who_share_room(
-                room_id, not is_public, to_update,
+                room_id, not is_public, to_update
             )
             to_update.clear()
 
@@ -320,50 +332,55 @@ class UserDirectoryHandler(object):
             # may have become public or not and add/remove the users in said room
             if typ in (EventTypes.RoomHistoryVisibility, EventTypes.JoinRules):
                 yield self._handle_room_publicity_change(
-                    room_id, prev_event_id, event_id, typ,
+                    room_id, prev_event_id, event_id, typ
                 )
             elif typ == EventTypes.Member:
                 change = yield self._get_key_change(
-                    prev_event_id, event_id,
+                    prev_event_id,
+                    event_id,
                     key_name="membership",
                     public_value=Membership.JOIN,
                 )
 
-                if change is None:
-                    # Handle any profile changes
-                    yield self._handle_profile_change(
-                        state_key, room_id, prev_event_id, event_id,
-                    )
-                    continue
-
-                if not change:
+                if change is False:
                     # Need to check if the server left the room entirely, if so
                     # we might need to remove all the users in that room
                     is_in_room = yield self.store.is_host_joined(
-                        room_id, self.server_name,
+                        room_id, self.server_name
                     )
                     if not is_in_room:
                         logger.info("Server left room: %r", room_id)
                         # Fetch all the users that we marked as being in user
                         # directory due to being in the room and then check if
                         # need to remove those users or not
-                        user_ids = yield self.store.get_users_in_dir_due_to_room(room_id)
+                        user_ids = yield self.store.get_users_in_dir_due_to_room(
+                            room_id
+                        )
                         for user_id in user_ids:
                             yield self._handle_remove_user(room_id, user_id)
                         return
                     else:
                         logger.debug("Server is still in room: %r", room_id)
 
-                if change:  # The user joined
-                    event = yield self.store.get_event(event_id, allow_none=True)
-                    profile = ProfileInfo(
-                        avatar_url=event.content.get("avatar_url"),
-                        display_name=event.content.get("displayname"),
-                    )
+                is_support = yield self.store.is_support_user(state_key)
+                if not is_support:
+                    if change is None:
+                        # Handle any profile changes
+                        yield self._handle_profile_change(
+                            state_key, room_id, prev_event_id, event_id
+                        )
+                        continue
+
+                    if change:  # The user joined
+                        event = yield self.store.get_event(event_id, allow_none=True)
+                        profile = ProfileInfo(
+                            avatar_url=event.content.get("avatar_url"),
+                            display_name=event.content.get("displayname"),
+                        )
 
-                    yield self._handle_new_user(room_id, state_key, profile)
-                else:  # The user left
-                    yield self._handle_remove_user(room_id, state_key)
+                        yield self._handle_new_user(room_id, state_key, profile)
+                    else:  # The user left
+                        yield self._handle_remove_user(room_id, state_key)
             else:
                 logger.debug("Ignoring irrelevant type: %r", typ)
 
@@ -382,13 +399,15 @@ class UserDirectoryHandler(object):
 
         if typ == EventTypes.RoomHistoryVisibility:
             change = yield self._get_key_change(
-                prev_event_id, event_id,
+                prev_event_id,
+                event_id,
                 key_name="history_visibility",
                 public_value="world_readable",
             )
         elif typ == EventTypes.JoinRules:
             change = yield self._get_key_change(
-                prev_event_id, event_id,
+                prev_event_id,
+                event_id,
                 key_name="join_rule",
                 public_value=JoinRules.PUBLIC,
             )
@@ -513,7 +532,7 @@ class UserDirectoryHandler(object):
             )
             if self.is_mine_id(other_user_id) and not is_appservice:
                 shared_is_private = yield self.store.get_if_users_share_a_room(
-                    other_user_id, user_id,
+                    other_user_id, user_id
                 )
                 if shared_is_private is True:
                     # We've already marked in the database they share a private room
@@ -528,13 +547,11 @@ class UserDirectoryHandler(object):
                     to_insert.add((other_user_id, user_id))
 
         if to_insert:
-            yield self.store.add_users_who_share_room(
-                room_id, not is_public, to_insert,
-            )
+            yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
 
         if to_update:
             yield self.store.update_users_who_share_room(
-                room_id, not is_public, to_update,
+                room_id, not is_public, to_update
             )
 
     @defer.inlineCallbacks
@@ -553,15 +570,15 @@ class UserDirectoryHandler(object):
         row = yield self.store.get_user_in_public_room(user_id)
         update_user_in_public = row and row["room_id"] == room_id
 
-        if (update_user_in_public or update_user_dir):
+        if update_user_in_public or update_user_dir:
             # XXX: Make this faster?
             rooms = yield self.store.get_rooms_for_user(user_id)
             for j_room_id in rooms:
-                if (not update_user_in_public and not update_user_dir):
+                if not update_user_in_public and not update_user_dir:
                     break
 
                 is_in_room = yield self.store.is_host_joined(
-                    j_room_id, self.server_name,
+                    j_room_id, self.server_name
                 )
 
                 if not is_in_room:
@@ -589,19 +606,19 @@ class UserDirectoryHandler(object):
         # Get a list of user tuples that were in the DB due to this room and
         # users (this includes tuples where the other user matches `user_id`)
         user_tuples = yield self.store.get_users_in_share_dir_with_room_id(
-            user_id, room_id,
+            user_id, room_id
         )
 
         for user_id, other_user_id in user_tuples:
             # For each user tuple get a list of rooms that they still share,
             # trying to find a private room, and update the entry in the DB
-            rooms = yield self.store.get_rooms_in_common_for_users(user_id, other_user_id)
+            rooms = yield self.store.get_rooms_in_common_for_users(
+                user_id, other_user_id
+            )
 
             # If they dont share a room anymore, remove the mapping
             if not rooms:
-                yield self.store.remove_user_who_share_room(
-                    user_id, other_user_id,
-                )
+                yield self.store.remove_user_who_share_room(user_id, other_user_id)
                 continue
 
             found_public_share = None
@@ -615,13 +632,13 @@ class UserDirectoryHandler(object):
                 else:
                     found_public_share = None
                     yield self.store.update_users_who_share_room(
-                        room_id, not is_public, [(user_id, other_user_id)],
+                        room_id, not is_public, [(user_id, other_user_id)]
                     )
                     break
 
             if found_public_share:
                 yield self.store.update_users_who_share_room(
-                    room_id, not is_public, [(user_id, other_user_id)],
+                    room_id, not is_public, [(user_id, other_user_id)]
                 )
 
     @defer.inlineCallbacks
@@ -649,7 +666,7 @@ class UserDirectoryHandler(object):
 
         if prev_name != new_name or prev_avatar != new_avatar:
             yield self.store.update_profile_in_user_dir(
-                user_id, new_name, new_avatar, room_id,
+                user_id, new_name, new_avatar, room_id
             )
 
     @defer.inlineCallbacks
diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py
index a3f9e4f67c..d36bcd6336 100644
--- a/synapse/http/__init__.py
+++ b/synapse/http/__init__.py
@@ -15,8 +15,10 @@
 # limitations under the License.
 import re
 
+from twisted.internet import task
 from twisted.internet.defer import CancelledError
 from twisted.python import failure
+from twisted.web.client import FileBodyProducer
 
 from synapse.api.errors import SynapseError
 
@@ -47,3 +49,16 @@ def redact_uri(uri):
         r'\1<redacted>\3',
         uri
     )
+
+
+class QuieterFileBodyProducer(FileBodyProducer):
+    """Wrapper for FileBodyProducer that avoids CRITICAL errors when the connection drops.
+
+    Workaround for https://github.com/matrix-org/synapse/issues/4003 /
+    https://twistedmatrix.com/trac/ticket/6528
+    """
+    def stopProducing(self):
+        try:
+            FileBodyProducer.stopProducing(self)
+        except task.TaskStopped:
+            pass
diff --git a/synapse/http/client.py b/synapse/http/client.py
index ab86c64788..ad454f4964 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -15,34 +15,36 @@
 # limitations under the License.
 
 import logging
+from io import BytesIO
 
 from six import text_type
 from six.moves import urllib
 
 import treq
 from canonicaljson import encode_canonical_json, json
+from netaddr import IPAddress
 from prometheus_client import Counter
+from zope.interface import implementer, provider
 
 from OpenSSL import SSL
 from OpenSSL.SSL import VERIFY_NONE
-from twisted.internet import defer, protocol, reactor, ssl
-from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
-from twisted.web._newclient import ResponseDone
-from twisted.web.client import (
-    Agent,
-    BrowserLikeRedirectAgent,
-    ContentDecoderAgent,
-    GzipDecoder,
-    HTTPConnectionPool,
-    PartialDownloadError,
-    readBody,
+from twisted.internet import defer, protocol, ssl
+from twisted.internet.interfaces import (
+    IReactorPluggableNameResolver,
+    IResolutionReceiver,
 )
+from twisted.python.failure import Failure
+from twisted.web._newclient import ResponseDone
+from twisted.web.client import Agent, HTTPConnectionPool, PartialDownloadError, readBody
 from twisted.web.http import PotentialDataLoss
 from twisted.web.http_headers import Headers
 
 from synapse.api.errors import Codes, HttpResponseException, SynapseError
-from synapse.http import cancelled_to_request_timed_out_error, redact_uri
-from synapse.http.endpoint import SpiderEndpoint
+from synapse.http import (
+    QuieterFileBodyProducer,
+    cancelled_to_request_timed_out_error,
+    redact_uri,
+)
 from synapse.util.async_helpers import timeout_deferred
 from synapse.util.caches import CACHE_SIZE_FACTOR
 from synapse.util.logcontext import make_deferred_yieldable
@@ -50,8 +52,125 @@ from synapse.util.logcontext import make_deferred_yieldable
 logger = logging.getLogger(__name__)
 
 outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"])
-incoming_responses_counter = Counter("synapse_http_client_responses", "",
-                                     ["method", "code"])
+incoming_responses_counter = Counter(
+    "synapse_http_client_responses", "", ["method", "code"]
+)
+
+
+def check_against_blacklist(ip_address, ip_whitelist, ip_blacklist):
+    """
+    Args:
+        ip_address (netaddr.IPAddress)
+        ip_whitelist (netaddr.IPSet)
+        ip_blacklist (netaddr.IPSet)
+    """
+    if ip_address in ip_blacklist:
+        if ip_whitelist is None or ip_address not in ip_whitelist:
+            return True
+    return False
+
+
+class IPBlacklistingResolver(object):
+    """
+    A proxy for reactor.nameResolver which only produces non-blacklisted IP
+    addresses, preventing DNS rebinding attacks on URL preview.
+    """
+
+    def __init__(self, reactor, ip_whitelist, ip_blacklist):
+        """
+        Args:
+            reactor (twisted.internet.reactor)
+            ip_whitelist (netaddr.IPSet)
+            ip_blacklist (netaddr.IPSet)
+        """
+        self._reactor = reactor
+        self._ip_whitelist = ip_whitelist
+        self._ip_blacklist = ip_blacklist
+
+    def resolveHostName(self, recv, hostname, portNumber=0):
+
+        r = recv()
+        d = defer.Deferred()
+        addresses = []
+
+        @provider(IResolutionReceiver)
+        class EndpointReceiver(object):
+            @staticmethod
+            def resolutionBegan(resolutionInProgress):
+                pass
+
+            @staticmethod
+            def addressResolved(address):
+                ip_address = IPAddress(address.host)
+
+                if check_against_blacklist(
+                    ip_address, self._ip_whitelist, self._ip_blacklist
+                ):
+                    logger.info(
+                        "Dropped %s from DNS resolution to %s" % (ip_address, hostname)
+                    )
+                    raise SynapseError(403, "IP address blocked by IP blacklist entry")
+
+                addresses.append(address)
+
+            @staticmethod
+            def resolutionComplete():
+                d.callback(addresses)
+
+        self._reactor.nameResolver.resolveHostName(
+            EndpointReceiver, hostname, portNumber=portNumber
+        )
+
+        def _callback(addrs):
+            r.resolutionBegan(None)
+            for i in addrs:
+                r.addressResolved(i)
+            r.resolutionComplete()
+
+        d.addCallback(_callback)
+
+        return r
+
+
+class BlacklistingAgentWrapper(Agent):
+    """
+    An Agent wrapper which will prevent access to IP addresses being accessed
+    directly (without an IP address lookup).
+    """
+
+    def __init__(self, agent, reactor, ip_whitelist=None, ip_blacklist=None):
+        """
+        Args:
+            agent (twisted.web.client.Agent): The Agent to wrap.
+            reactor (twisted.internet.reactor)
+            ip_whitelist (netaddr.IPSet)
+            ip_blacklist (netaddr.IPSet)
+        """
+        self._agent = agent
+        self._ip_whitelist = ip_whitelist
+        self._ip_blacklist = ip_blacklist
+
+    def request(self, method, uri, headers=None, bodyProducer=None):
+        h = urllib.parse.urlparse(uri.decode('ascii'))
+
+        try:
+            ip_address = IPAddress(h.hostname)
+
+            if check_against_blacklist(
+                ip_address, self._ip_whitelist, self._ip_blacklist
+            ):
+                logger.info(
+                    "Blocking access to %s because of blacklist" % (ip_address,)
+                )
+                e = SynapseError(403, "IP address blocked by IP blacklist entry")
+                return defer.fail(Failure(e))
+        except Exception:
+            # Not an IP
+            pass
+
+        return self._agent.request(
+            method, uri, headers=headers, bodyProducer=bodyProducer
+        )
 
 
 class SimpleHttpClient(object):
@@ -59,14 +178,54 @@ class SimpleHttpClient(object):
     A simple, no-frills HTTP client with methods that wrap up common ways of
     using HTTP in Matrix
     """
-    def __init__(self, hs):
+
+    def __init__(self, hs, treq_args={}, ip_whitelist=None, ip_blacklist=None):
+        """
+        Args:
+            hs (synapse.server.HomeServer)
+            treq_args (dict): Extra keyword arguments to be given to treq.request.
+            ip_blacklist (netaddr.IPSet): The IP addresses that are blacklisted that
+                we may not request.
+            ip_whitelist (netaddr.IPSet): The whitelisted IP addresses, that we can
+               request if it were otherwise caught in a blacklist.
+        """
         self.hs = hs
 
-        pool = HTTPConnectionPool(reactor)
+        self._ip_whitelist = ip_whitelist
+        self._ip_blacklist = ip_blacklist
+        self._extra_treq_args = treq_args
+
+        self.user_agent = hs.version_string
+        self.clock = hs.get_clock()
+        if hs.config.user_agent_suffix:
+            self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix)
+
+        self.user_agent = self.user_agent.encode('ascii')
+
+        if self._ip_blacklist:
+            real_reactor = hs.get_reactor()
+            # If we have an IP blacklist, we need to use a DNS resolver which
+            # filters out blacklisted IP addresses, to prevent DNS rebinding.
+            nameResolver = IPBlacklistingResolver(
+                real_reactor, self._ip_whitelist, self._ip_blacklist
+            )
+
+            @implementer(IReactorPluggableNameResolver)
+            class Reactor(object):
+                def __getattr__(_self, attr):
+                    if attr == "nameResolver":
+                        return nameResolver
+                    else:
+                        return getattr(real_reactor, attr)
+
+            self.reactor = Reactor()
+        else:
+            self.reactor = hs.get_reactor()
 
         # the pusher makes lots of concurrent SSL connections to sygnal, and
-        # tends to do so in batches, so we need to allow the pool to keep lots
-        # of idle connections around.
+        # tends to do so in batches, so we need to allow the pool to keep
+        # lots of idle connections around.
+        pool = HTTPConnectionPool(self.reactor)
         pool.maxPersistentPerHost = max((100 * CACHE_SIZE_FACTOR, 5))
         pool.cachedConnectionTimeout = 2 * 60
 
@@ -74,20 +233,35 @@ class SimpleHttpClient(object):
         # BrowserLikePolicyForHTTPS which will do regular cert validation
         # 'like a browser'
         self.agent = Agent(
-            reactor,
+            self.reactor,
             connectTimeout=15,
-            contextFactory=hs.get_http_client_context_factory(),
+            contextFactory=self.hs.get_http_client_context_factory(),
             pool=pool,
         )
-        self.user_agent = hs.version_string
-        self.clock = hs.get_clock()
-        if hs.config.user_agent_suffix:
-            self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix,)
 
-        self.user_agent = self.user_agent.encode('ascii')
+        if self._ip_blacklist:
+            # If we have an IP blacklist, we then install the blacklisting Agent
+            # which prevents direct access to IP addresses, that are not caught
+            # by the DNS resolution.
+            self.agent = BlacklistingAgentWrapper(
+                self.agent,
+                self.reactor,
+                ip_whitelist=self._ip_whitelist,
+                ip_blacklist=self._ip_blacklist,
+            )
 
     @defer.inlineCallbacks
-    def request(self, method, uri, data=b'', headers=None):
+    def request(self, method, uri, data=None, headers=None):
+        """
+        Args:
+            method (str): HTTP method to use.
+            uri (str): URI to query.
+            data (bytes): Data to send in the request body, if applicable.
+            headers (t.w.http_headers.Headers): Request headers.
+
+        Raises:
+            SynapseError: If the IP is blacklisted.
+        """
         # A small wrapper around self.agent.request() so we can easily attach
         # counters to it
         outgoing_requests_counter.labels(method).inc()
@@ -96,26 +270,39 @@ class SimpleHttpClient(object):
         logger.info("Sending request %s %s", method, redact_uri(uri))
 
         try:
+            body_producer = None
+            if data is not None:
+                body_producer = QuieterFileBodyProducer(BytesIO(data))
+
             request_deferred = treq.request(
-                method, uri, agent=self.agent, data=data, headers=headers
+                method,
+                uri,
+                agent=self.agent,
+                data=body_producer,
+                headers=headers,
+                **self._extra_treq_args
             )
             request_deferred = timeout_deferred(
-                request_deferred, 60, self.hs.get_reactor(),
+                request_deferred,
+                60,
+                self.hs.get_reactor(),
                 cancelled_to_request_timed_out_error,
             )
             response = yield make_deferred_yieldable(request_deferred)
 
             incoming_responses_counter.labels(method, response.code).inc()
             logger.info(
-                "Received response to  %s %s: %s",
-                method, redact_uri(uri), response.code
+                "Received response to %s %s: %s", method, redact_uri(uri), response.code
             )
             defer.returnValue(response)
         except Exception as e:
             incoming_responses_counter.labels(method, "ERR").inc()
             logger.info(
                 "Error sending request to  %s %s: %s %s",
-                method, redact_uri(uri), type(e).__name__, e.args[0]
+                method,
+                redact_uri(uri),
+                type(e).__name__,
+                e.args[0],
             )
             raise
 
@@ -140,8 +327,9 @@ class SimpleHttpClient(object):
         # TODO: Do we ever want to log message contents?
         logger.debug("post_urlencoded_get_json args: %s", args)
 
-        query_bytes = urllib.parse.urlencode(
-            encode_urlencode_args(args), True).encode("utf8")
+        query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True).encode(
+            "utf8"
+        )
 
         actual_headers = {
             b"Content-Type": [b"application/x-www-form-urlencoded"],
@@ -151,16 +339,13 @@ class SimpleHttpClient(object):
             actual_headers.update(headers)
 
         response = yield self.request(
-            "POST",
-            uri,
-            headers=Headers(actual_headers),
-            data=query_bytes
+            "POST", uri, headers=Headers(actual_headers), data=query_bytes
         )
 
-        body = yield make_deferred_yieldable(treq.json_content(response))
+        body = yield make_deferred_yieldable(readBody(response))
 
         if 200 <= response.code < 300:
-            defer.returnValue(body)
+            defer.returnValue(json.loads(body))
         else:
             raise HttpResponseException(response.code, response.phrase, body)
 
@@ -194,10 +379,7 @@ class SimpleHttpClient(object):
             actual_headers.update(headers)
 
         response = yield self.request(
-            "POST",
-            uri,
-            headers=Headers(actual_headers),
-            data=json_str
+            "POST", uri, headers=Headers(actual_headers), data=json_str
         )
 
         body = yield make_deferred_yieldable(readBody(response))
@@ -265,10 +447,7 @@ class SimpleHttpClient(object):
             actual_headers.update(headers)
 
         response = yield self.request(
-            "PUT",
-            uri,
-            headers=Headers(actual_headers),
-            data=json_str
+            "PUT", uri, headers=Headers(actual_headers), data=json_str
         )
 
         body = yield make_deferred_yieldable(readBody(response))
@@ -300,17 +479,11 @@ class SimpleHttpClient(object):
             query_bytes = urllib.parse.urlencode(args, True)
             uri = "%s?%s" % (uri, query_bytes)
 
-        actual_headers = {
-            b"User-Agent": [self.user_agent],
-        }
+        actual_headers = {b"User-Agent": [self.user_agent]}
         if headers:
             actual_headers.update(headers)
 
-        response = yield self.request(
-            "GET",
-            uri,
-            headers=Headers(actual_headers),
-        )
+        response = yield self.request("GET", uri, headers=Headers(actual_headers))
 
         body = yield make_deferred_yieldable(readBody(response))
 
@@ -335,22 +508,18 @@ class SimpleHttpClient(object):
             headers, absolute URI of the response and HTTP response code.
         """
 
-        actual_headers = {
-            b"User-Agent": [self.user_agent],
-        }
+        actual_headers = {b"User-Agent": [self.user_agent]}
         if headers:
             actual_headers.update(headers)
 
-        response = yield self.request(
-            "GET",
-            url,
-            headers=Headers(actual_headers),
-        )
+        response = yield self.request("GET", url, headers=Headers(actual_headers))
 
         resp_headers = dict(response.headers.getAllRawHeaders())
 
-        if (b'Content-Length' in resp_headers and
-                int(resp_headers[b'Content-Length']) > max_size):
+        if (
+            b'Content-Length' in resp_headers
+            and int(resp_headers[b'Content-Length'][0]) > max_size
+        ):
             logger.warn("Requested URL is too large > %r bytes" % (self.max_size,))
             raise SynapseError(
                 502,
@@ -360,26 +529,20 @@ class SimpleHttpClient(object):
 
         if response.code > 299:
             logger.warn("Got %d when downloading %s" % (response.code, url))
-            raise SynapseError(
-                502,
-                "Got error %d" % (response.code,),
-                Codes.UNKNOWN,
-            )
+            raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN)
 
         # TODO: if our Content-Type is HTML or something, just read the first
         # N bytes into RAM rather than saving it all to disk only to read it
         # straight back in again
 
         try:
-            length = yield make_deferred_yieldable(_readBodyToFile(
-                response, output_stream, max_size,
-            ))
+            length = yield make_deferred_yieldable(
+                _readBodyToFile(response, output_stream, max_size)
+            )
         except Exception as e:
             logger.exception("Failed to download body")
             raise SynapseError(
-                502,
-                ("Failed to download remote body: %s" % e),
-                Codes.UNKNOWN,
+                502, ("Failed to download remote body: %s" % e), Codes.UNKNOWN
             )
 
         defer.returnValue(
@@ -388,13 +551,14 @@ class SimpleHttpClient(object):
                 resp_headers,
                 response.request.absoluteURI.decode('ascii'),
                 response.code,
-            ),
+            )
         )
 
 
 # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient.
 # The two should be factored out.
 
+
 class _ReadBodyToFileProtocol(protocol.Protocol):
     def __init__(self, stream, deferred, max_size):
         self.stream = stream
@@ -406,11 +570,13 @@ class _ReadBodyToFileProtocol(protocol.Protocol):
         self.stream.write(data)
         self.length += len(data)
         if self.max_size is not None and self.length >= self.max_size:
-            self.deferred.errback(SynapseError(
-                502,
-                "Requested file is too large > %r bytes" % (self.max_size,),
-                Codes.TOO_LARGE,
-            ))
+            self.deferred.errback(
+                SynapseError(
+                    502,
+                    "Requested file is too large > %r bytes" % (self.max_size,),
+                    Codes.TOO_LARGE,
+                )
+            )
             self.deferred = defer.Deferred()
             self.transport.loseConnection()
 
@@ -428,6 +594,7 @@ class _ReadBodyToFileProtocol(protocol.Protocol):
 # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient.
 # The two should be factored out.
 
+
 def _readBodyToFile(response, stream, max_size):
     d = defer.Deferred()
     response.deliverBody(_ReadBodyToFileProtocol(stream, d, max_size))
@@ -450,10 +617,12 @@ class CaptchaServerHttpClient(SimpleHttpClient):
             "POST",
             url,
             data=query_bytes,
-            headers=Headers({
-                b"Content-Type": [b"application/x-www-form-urlencoded"],
-                b"User-Agent": [self.user_agent],
-            })
+            headers=Headers(
+                {
+                    b"Content-Type": [b"application/x-www-form-urlencoded"],
+                    b"User-Agent": [self.user_agent],
+                }
+            ),
         )
 
         try:
@@ -464,57 +633,6 @@ class CaptchaServerHttpClient(SimpleHttpClient):
             defer.returnValue(e.response)
 
 
-class SpiderEndpointFactory(object):
-    def __init__(self, hs):
-        self.blacklist = hs.config.url_preview_ip_range_blacklist
-        self.whitelist = hs.config.url_preview_ip_range_whitelist
-        self.policyForHTTPS = hs.get_http_client_context_factory()
-
-    def endpointForURI(self, uri):
-        logger.info("Getting endpoint for %s", uri.toBytes())
-
-        if uri.scheme == b"http":
-            endpoint_factory = HostnameEndpoint
-        elif uri.scheme == b"https":
-            tlsCreator = self.policyForHTTPS.creatorForNetloc(uri.host, uri.port)
-
-            def endpoint_factory(reactor, host, port, **kw):
-                return wrapClientTLS(
-                    tlsCreator,
-                    HostnameEndpoint(reactor, host, port, **kw))
-        else:
-            logger.warn("Can't get endpoint for unrecognised scheme %s", uri.scheme)
-            return None
-        return SpiderEndpoint(
-            reactor, uri.host, uri.port, self.blacklist, self.whitelist,
-            endpoint=endpoint_factory, endpoint_kw_args=dict(timeout=15),
-        )
-
-
-class SpiderHttpClient(SimpleHttpClient):
-    """
-    Separate HTTP client for spidering arbitrary URLs.
-    Special in that it follows retries and has a UA that looks
-    like a browser.
-
-    used by the preview_url endpoint in the content repo.
-    """
-    def __init__(self, hs):
-        SimpleHttpClient.__init__(self, hs)
-        # clobber the base class's agent and UA:
-        self.agent = ContentDecoderAgent(
-            BrowserLikeRedirectAgent(
-                Agent.usingEndpointFactory(
-                    reactor,
-                    SpiderEndpointFactory(hs)
-                )
-            ), [(b'gzip', GzipDecoder)]
-        )
-        # We could look like Chrome:
-        # self.user_agent = ("Mozilla/5.0 (%s) (KHTML, like Gecko)
-        #                   Chrome Safari" % hs.version_string)
-
-
 def encode_urlencode_args(args):
     return {k: encode_urlencode_arg(v) for k, v in args.items()}
 
diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py
index 91025037a3..cd79ebab62 100644
--- a/synapse/http/endpoint.py
+++ b/synapse/http/endpoint.py
@@ -12,30 +12,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import collections
 import logging
-import random
 import re
-import time
-
-from twisted.internet import defer
-from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
-from twisted.internet.error import ConnectError
-from twisted.names import client, dns
-from twisted.names.error import DNSNameError, DomainError
 
 logger = logging.getLogger(__name__)
 
-SERVER_CACHE = {}
-
-# our record of an individual server which can be tried to reach a destination.
-#
-# "host" is the hostname acquired from the SRV record. Except when there's
-# no SRV record, in which case it is the original hostname.
-_Server = collections.namedtuple(
-    "_Server", "priority weight host port expires"
-)
-
 
 def parse_server_name(server_name):
     """Split a server name into host/port parts.
@@ -100,299 +81,3 @@ def parse_and_validate_server_name(server_name):
         ))
 
     return host, port
-
-
-def matrix_federation_endpoint(reactor, destination, tls_client_options_factory=None,
-                               timeout=None):
-    """Construct an endpoint for the given matrix destination.
-
-    Args:
-        reactor: Twisted reactor.
-        destination (unicode): The name of the server to connect to.
-        tls_client_options_factory
-            (synapse.crypto.context_factory.ClientTLSOptionsFactory):
-            Factory which generates TLS options for client connections.
-        timeout (int): connection timeout in seconds
-    """
-
-    domain, port = parse_server_name(destination)
-
-    endpoint_kw_args = {}
-
-    if timeout is not None:
-        endpoint_kw_args.update(timeout=timeout)
-
-    if tls_client_options_factory is None:
-        transport_endpoint = HostnameEndpoint
-        default_port = 8008
-    else:
-        # the SNI string should be the same as the Host header, minus the port.
-        # as per https://github.com/matrix-org/synapse/issues/2525#issuecomment-336896777,
-        # the Host header and SNI should therefore be the server_name of the remote
-        # server.
-        tls_options = tls_client_options_factory.get_options(domain)
-
-        def transport_endpoint(reactor, host, port, timeout):
-            return wrapClientTLS(
-                tls_options,
-                HostnameEndpoint(reactor, host, port, timeout=timeout),
-            )
-        default_port = 8448
-
-    if port is None:
-        return _WrappingEndpointFac(SRVClientEndpoint(
-            reactor, "matrix", domain, protocol="tcp",
-            default_port=default_port, endpoint=transport_endpoint,
-            endpoint_kw_args=endpoint_kw_args
-        ), reactor)
-    else:
-        return _WrappingEndpointFac(transport_endpoint(
-            reactor, domain, port, **endpoint_kw_args
-        ), reactor)
-
-
-class _WrappingEndpointFac(object):
-    def __init__(self, endpoint_fac, reactor):
-        self.endpoint_fac = endpoint_fac
-        self.reactor = reactor
-
-    @defer.inlineCallbacks
-    def connect(self, protocolFactory):
-        conn = yield self.endpoint_fac.connect(protocolFactory)
-        conn = _WrappedConnection(conn, self.reactor)
-        defer.returnValue(conn)
-
-
-class _WrappedConnection(object):
-    """Wraps a connection and calls abort on it if it hasn't seen any action
-    for 2.5-3 minutes.
-    """
-    __slots__ = ["conn", "last_request"]
-
-    def __init__(self, conn, reactor):
-        object.__setattr__(self, "conn", conn)
-        object.__setattr__(self, "last_request", time.time())
-        self._reactor = reactor
-
-    def __getattr__(self, name):
-        return getattr(self.conn, name)
-
-    def __setattr__(self, name, value):
-        setattr(self.conn, name, value)
-
-    def _time_things_out_maybe(self):
-        # We use a slightly shorter timeout here just in case the callLater is
-        # triggered early. Paranoia ftw.
-        # TODO: Cancel the previous callLater rather than comparing time.time()?
-        if time.time() - self.last_request >= 2.5 * 60:
-            self.abort()
-            # Abort the underlying TLS connection. The abort() method calls
-            # loseConnection() on the TLS connection which tries to
-            # shutdown the connection cleanly. We call abortConnection()
-            # since that will promptly close the TLS connection.
-            #
-            # In Twisted >18.4; the TLS connection will be None if it has closed
-            # which will make abortConnection() throw. Check that the TLS connection
-            # is not None before trying to close it.
-            if self.transport.getHandle() is not None:
-                self.transport.abortConnection()
-
-    def request(self, request):
-        self.last_request = time.time()
-
-        # Time this connection out if we haven't send a request in the last
-        # N minutes
-        # TODO: Cancel the previous callLater?
-        self._reactor.callLater(3 * 60, self._time_things_out_maybe)
-
-        d = self.conn.request(request)
-
-        def update_request_time(res):
-            self.last_request = time.time()
-            # TODO: Cancel the previous callLater?
-            self._reactor.callLater(3 * 60, self._time_things_out_maybe)
-            return res
-
-        d.addCallback(update_request_time)
-
-        return d
-
-
-class SpiderEndpoint(object):
-    """An endpoint which refuses to connect to blacklisted IP addresses
-    Implements twisted.internet.interfaces.IStreamClientEndpoint.
-    """
-    def __init__(self, reactor, host, port, blacklist, whitelist,
-                 endpoint=HostnameEndpoint, endpoint_kw_args={}):
-        self.reactor = reactor
-        self.host = host
-        self.port = port
-        self.blacklist = blacklist
-        self.whitelist = whitelist
-        self.endpoint = endpoint
-        self.endpoint_kw_args = endpoint_kw_args
-
-    @defer.inlineCallbacks
-    def connect(self, protocolFactory):
-        address = yield self.reactor.resolve(self.host)
-
-        from netaddr import IPAddress
-        ip_address = IPAddress(address)
-
-        if ip_address in self.blacklist:
-            if self.whitelist is None or ip_address not in self.whitelist:
-                raise ConnectError(
-                    "Refusing to spider blacklisted IP address %s" % address
-                )
-
-        logger.info("Connecting to %s:%s", address, self.port)
-        endpoint = self.endpoint(
-            self.reactor, address, self.port, **self.endpoint_kw_args
-        )
-        connection = yield endpoint.connect(protocolFactory)
-        defer.returnValue(connection)
-
-
-class SRVClientEndpoint(object):
-    """An endpoint which looks up SRV records for a service.
-    Cycles through the list of servers starting with each call to connect
-    picking the next server.
-    Implements twisted.internet.interfaces.IStreamClientEndpoint.
-    """
-
-    def __init__(self, reactor, service, domain, protocol="tcp",
-                 default_port=None, endpoint=HostnameEndpoint,
-                 endpoint_kw_args={}):
-        self.reactor = reactor
-        self.service_name = "_%s._%s.%s" % (service, protocol, domain)
-
-        if default_port is not None:
-            self.default_server = _Server(
-                host=domain,
-                port=default_port,
-                priority=0,
-                weight=0,
-                expires=0,
-            )
-        else:
-            self.default_server = None
-
-        self.endpoint = endpoint
-        self.endpoint_kw_args = endpoint_kw_args
-
-        self.servers = None
-        self.used_servers = None
-
-    @defer.inlineCallbacks
-    def fetch_servers(self):
-        self.used_servers = []
-        self.servers = yield resolve_service(self.service_name)
-
-    def pick_server(self):
-        if not self.servers:
-            if self.used_servers:
-                self.servers = self.used_servers
-                self.used_servers = []
-                self.servers.sort()
-            elif self.default_server:
-                return self.default_server
-            else:
-                raise ConnectError(
-                    "No server available for %s" % self.service_name
-                )
-
-        # look for all servers with the same priority
-        min_priority = self.servers[0].priority
-        weight_indexes = list(
-            (index, server.weight + 1)
-            for index, server in enumerate(self.servers)
-            if server.priority == min_priority
-        )
-
-        total_weight = sum(weight for index, weight in weight_indexes)
-        target_weight = random.randint(0, total_weight)
-        for index, weight in weight_indexes:
-            target_weight -= weight
-            if target_weight <= 0:
-                server = self.servers[index]
-                # XXX: this looks totally dubious:
-                #
-                # (a) we never reuse a server until we have been through
-                #     all of the servers at the same priority, so if the
-                #     weights are A: 100, B:1, we always do ABABAB instead of
-                #     AAAA...AAAB (approximately).
-                #
-                # (b) After using all the servers at the lowest priority,
-                #     we move onto the next priority. We should only use the
-                #     second priority if servers at the top priority are
-                #     unreachable.
-                #
-                del self.servers[index]
-                self.used_servers.append(server)
-                return server
-
-    @defer.inlineCallbacks
-    def connect(self, protocolFactory):
-        if self.servers is None:
-            yield self.fetch_servers()
-        server = self.pick_server()
-        logger.info("Connecting to %s:%s", server.host, server.port)
-        endpoint = self.endpoint(
-            self.reactor, server.host, server.port, **self.endpoint_kw_args
-        )
-        connection = yield endpoint.connect(protocolFactory)
-        defer.returnValue(connection)
-
-
-@defer.inlineCallbacks
-def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=time):
-    cache_entry = cache.get(service_name, None)
-    if cache_entry:
-        if all(s.expires > int(clock.time()) for s in cache_entry):
-            servers = list(cache_entry)
-            defer.returnValue(servers)
-
-    servers = []
-
-    try:
-        try:
-            answers, _, _ = yield dns_client.lookupService(service_name)
-        except DNSNameError:
-            defer.returnValue([])
-
-        if (len(answers) == 1
-                and answers[0].type == dns.SRV
-                and answers[0].payload
-                and answers[0].payload.target == dns.Name(b'.')):
-            raise ConnectError("Service %s unavailable" % service_name)
-
-        for answer in answers:
-            if answer.type != dns.SRV or not answer.payload:
-                continue
-
-            payload = answer.payload
-
-            servers.append(_Server(
-                host=str(payload.target),
-                port=int(payload.port),
-                priority=int(payload.priority),
-                weight=int(payload.weight),
-                expires=int(clock.time()) + answer.ttl,
-            ))
-
-        servers.sort()
-        cache[service_name] = list(servers)
-    except DomainError as e:
-        # We failed to resolve the name (other than a NameError)
-        # Try something in the cache, else rereaise
-        cache_entry = cache.get(service_name, None)
-        if cache_entry:
-            logger.warn(
-                "Failed to resolve %r, falling back to cache. %r",
-                service_name, e
-            )
-            servers = list(cache_entry)
-        else:
-            raise e
-
-    defer.returnValue(servers)
diff --git a/synapse/rest/key/v1/__init__.py b/synapse/http/federation/__init__.py
index fe0ac3f8e9..1453d04571 100644
--- a/synapse/rest/key/v1/__init__.py
+++ b/synapse/http/federation/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
new file mode 100644
index 0000000000..384d8a37a2
--- /dev/null
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -0,0 +1,452 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+import logging
+import random
+import time
+
+import attr
+from netaddr import IPAddress
+from zope.interface import implementer
+
+from twisted.internet import defer
+from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
+from twisted.internet.interfaces import IStreamClientEndpoint
+from twisted.web.client import URI, Agent, HTTPConnectionPool, RedirectAgent, readBody
+from twisted.web.http import stringToDatetime
+from twisted.web.http_headers import Headers
+from twisted.web.iweb import IAgent
+
+from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
+from synapse.util import Clock
+from synapse.util.caches.ttlcache import TTLCache
+from synapse.util.logcontext import make_deferred_yieldable
+from synapse.util.metrics import Measure
+
+# period to cache .well-known results for by default
+WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
+
+# jitter to add to the .well-known default cache ttl
+WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
+
+# period to cache failure to fetch .well-known for
+WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
+
+# cap for .well-known cache period
+WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
+
+logger = logging.getLogger(__name__)
+well_known_cache = TTLCache('well-known')
+
+
+@implementer(IAgent)
+class MatrixFederationAgent(object):
+    """An Agent-like thing which provides a `request` method which will look up a matrix
+    server and send an HTTP request to it.
+
+    Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
+
+    Args:
+        reactor (IReactor): twisted reactor to use for underlying requests
+
+        tls_client_options_factory (ClientTLSOptionsFactory|None):
+            factory to use for fetching client tls options, or none to disable TLS.
+
+        _well_known_tls_policy (IPolicyForHTTPS|None):
+            TLS policy to use for fetching .well-known files. None to use a default
+            (browser-like) implementation.
+
+        srv_resolver (SrvResolver|None):
+            SRVResolver impl to use for looking up SRV records. None to use a default
+            implementation.
+    """
+
+    def __init__(
+        self, reactor, tls_client_options_factory,
+        _well_known_tls_policy=None,
+        _srv_resolver=None,
+        _well_known_cache=well_known_cache,
+    ):
+        self._reactor = reactor
+        self._clock = Clock(reactor)
+
+        self._tls_client_options_factory = tls_client_options_factory
+        if _srv_resolver is None:
+            _srv_resolver = SrvResolver()
+        self._srv_resolver = _srv_resolver
+
+        self._pool = HTTPConnectionPool(reactor)
+        self._pool.retryAutomatically = False
+        self._pool.maxPersistentPerHost = 5
+        self._pool.cachedConnectionTimeout = 2 * 60
+
+        agent_args = {}
+        if _well_known_tls_policy is not None:
+            # the param is called 'contextFactory', but actually passing a
+            # contextfactory is deprecated, and it expects an IPolicyForHTTPS.
+            agent_args['contextFactory'] = _well_known_tls_policy
+        _well_known_agent = RedirectAgent(
+            Agent(self._reactor, pool=self._pool, **agent_args),
+        )
+        self._well_known_agent = _well_known_agent
+
+        # our cache of .well-known lookup results, mapping from server name
+        # to delegated name. The values can be:
+        #   `bytes`:     a valid server-name
+        #   `None`:      there is no (valid) .well-known here
+        self._well_known_cache = _well_known_cache
+
+    @defer.inlineCallbacks
+    def request(self, method, uri, headers=None, bodyProducer=None):
+        """
+        Args:
+            method (bytes): HTTP method: GET/POST/etc
+
+            uri (bytes): Absolute URI to be retrieved
+
+            headers (twisted.web.http_headers.Headers|None):
+                HTTP headers to send with the request, or None to
+                send no extra headers.
+
+            bodyProducer (twisted.web.iweb.IBodyProducer|None):
+                An object which can generate bytes to make up the
+                body of this request (for example, the properly encoded contents of
+                a file for a file upload).  Or None if the request is to have
+                no body.
+
+        Returns:
+            Deferred[twisted.web.iweb.IResponse]:
+                fires when the header of the response has been received (regardless of the
+                response status code). Fails if there is any problem which prevents that
+                response from being received (including problems that prevent the request
+                from being sent).
+        """
+        parsed_uri = URI.fromBytes(uri, defaultPort=-1)
+        res = yield self._route_matrix_uri(parsed_uri)
+
+        # set up the TLS connection params
+        #
+        # XXX disabling TLS is really only supported here for the benefit of the
+        # unit tests. We should make the UTs cope with TLS rather than having to make
+        # the code support the unit tests.
+        if self._tls_client_options_factory is None:
+            tls_options = None
+        else:
+            tls_options = self._tls_client_options_factory.get_options(
+                res.tls_server_name.decode("ascii")
+            )
+
+        # make sure that the Host header is set correctly
+        if headers is None:
+            headers = Headers()
+        else:
+            headers = headers.copy()
+
+        if not headers.hasHeader(b'host'):
+            headers.addRawHeader(b'host', res.host_header)
+
+        class EndpointFactory(object):
+            @staticmethod
+            def endpointForURI(_uri):
+                ep = LoggingHostnameEndpoint(
+                    self._reactor, res.target_host, res.target_port,
+                )
+                if tls_options is not None:
+                    ep = wrapClientTLS(tls_options, ep)
+                return ep
+
+        agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool)
+        res = yield make_deferred_yieldable(
+            agent.request(method, uri, headers, bodyProducer)
+        )
+        defer.returnValue(res)
+
+    @defer.inlineCallbacks
+    def _route_matrix_uri(self, parsed_uri, lookup_well_known=True):
+        """Helper for `request`: determine the routing for a Matrix URI
+
+        Args:
+            parsed_uri (twisted.web.client.URI): uri to route. Note that it should be
+                parsed with URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1
+                if there is no explicit port given.
+
+            lookup_well_known (bool): True if we should look up the .well-known file if
+                there is no SRV record.
+
+        Returns:
+            Deferred[_RoutingResult]
+        """
+        # check for an IP literal
+        try:
+            ip_address = IPAddress(parsed_uri.host.decode("ascii"))
+        except Exception:
+            # not an IP address
+            ip_address = None
+
+        if ip_address:
+            port = parsed_uri.port
+            if port == -1:
+                port = 8448
+            defer.returnValue(_RoutingResult(
+                host_header=parsed_uri.netloc,
+                tls_server_name=parsed_uri.host,
+                target_host=parsed_uri.host,
+                target_port=port,
+            ))
+
+        if parsed_uri.port != -1:
+            # there is an explicit port
+            defer.returnValue(_RoutingResult(
+                host_header=parsed_uri.netloc,
+                tls_server_name=parsed_uri.host,
+                target_host=parsed_uri.host,
+                target_port=parsed_uri.port,
+            ))
+
+        if lookup_well_known:
+            # try a .well-known lookup
+            well_known_server = yield self._get_well_known(parsed_uri.host)
+
+            if well_known_server:
+                # if we found a .well-known, start again, but don't do another
+                # .well-known lookup.
+
+                # parse the server name in the .well-known response into host/port.
+                # (This code is lifted from twisted.web.client.URI.fromBytes).
+                if b':' in well_known_server:
+                    well_known_host, well_known_port = well_known_server.rsplit(b':', 1)
+                    try:
+                        well_known_port = int(well_known_port)
+                    except ValueError:
+                        # the part after the colon could not be parsed as an int
+                        # - we assume it is an IPv6 literal with no port (the closing
+                        # ']' stops it being parsed as an int)
+                        well_known_host, well_known_port = well_known_server, -1
+                else:
+                    well_known_host, well_known_port = well_known_server, -1
+
+                new_uri = URI(
+                    scheme=parsed_uri.scheme,
+                    netloc=well_known_server,
+                    host=well_known_host,
+                    port=well_known_port,
+                    path=parsed_uri.path,
+                    params=parsed_uri.params,
+                    query=parsed_uri.query,
+                    fragment=parsed_uri.fragment,
+                )
+
+                res = yield self._route_matrix_uri(new_uri, lookup_well_known=False)
+                defer.returnValue(res)
+
+        # try a SRV lookup
+        service_name = b"_matrix._tcp.%s" % (parsed_uri.host,)
+        server_list = yield self._srv_resolver.resolve_service(service_name)
+
+        if not server_list:
+            target_host = parsed_uri.host
+            port = 8448
+            logger.debug(
+                "No SRV record for %s, using %s:%i",
+                parsed_uri.host.decode("ascii"), target_host.decode("ascii"), port,
+            )
+        else:
+            target_host, port = pick_server_from_list(server_list)
+            logger.debug(
+                "Picked %s:%i from SRV records for %s",
+                target_host.decode("ascii"), port, parsed_uri.host.decode("ascii"),
+            )
+
+        defer.returnValue(_RoutingResult(
+            host_header=parsed_uri.netloc,
+            tls_server_name=parsed_uri.host,
+            target_host=target_host,
+            target_port=port,
+        ))
+
+    @defer.inlineCallbacks
+    def _get_well_known(self, server_name):
+        """Attempt to fetch and parse a .well-known file for the given server
+
+        Args:
+            server_name (bytes): name of the server, from the requested url
+
+        Returns:
+            Deferred[bytes|None]: either the new server name, from the .well-known, or
+                None if there was no .well-known file.
+        """
+        try:
+            result = self._well_known_cache[server_name]
+        except KeyError:
+            # TODO: should we linearise so that we don't end up doing two .well-known
+            # requests for the same server in parallel?
+            with Measure(self._clock, "get_well_known"):
+                result, cache_period = yield self._do_get_well_known(server_name)
+
+            if cache_period > 0:
+                self._well_known_cache.set(server_name, result, cache_period)
+
+        defer.returnValue(result)
+
+    @defer.inlineCallbacks
+    def _do_get_well_known(self, server_name):
+        """Actually fetch and parse a .well-known, without checking the cache
+
+        Args:
+            server_name (bytes): name of the server, from the requested url
+
+        Returns:
+            Deferred[Tuple[bytes|None|object],int]:
+                result, cache period, where result is one of:
+                 - the new server name from the .well-known (as a `bytes`)
+                 - None if there was no .well-known file.
+                 - INVALID_WELL_KNOWN if the .well-known was invalid
+        """
+        uri = b"https://%s/.well-known/matrix/server" % (server_name, )
+        uri_str = uri.decode("ascii")
+        logger.info("Fetching %s", uri_str)
+        try:
+            response = yield make_deferred_yieldable(
+                self._well_known_agent.request(b"GET", uri),
+            )
+            body = yield make_deferred_yieldable(readBody(response))
+            if response.code != 200:
+                raise Exception("Non-200 response %s" % (response.code, ))
+
+            parsed_body = json.loads(body.decode('utf-8'))
+            logger.info("Response from .well-known: %s", parsed_body)
+            if not isinstance(parsed_body, dict):
+                raise Exception("not a dict")
+            if "m.server" not in parsed_body:
+                raise Exception("Missing key 'm.server'")
+        except Exception as e:
+            logger.info("Error fetching %s: %s", uri_str, e)
+
+            # add some randomness to the TTL to avoid a stampeding herd every hour
+            # after startup
+            cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
+            cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
+            defer.returnValue((None, cache_period))
+
+        result = parsed_body["m.server"].encode("ascii")
+
+        cache_period = _cache_period_from_headers(
+            response.headers,
+            time_now=self._reactor.seconds,
+        )
+        if cache_period is None:
+            cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
+            # add some randomness to the TTL to avoid a stampeding herd every 24 hours
+            # after startup
+            cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
+        else:
+            cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
+
+        defer.returnValue((result, cache_period))
+
+
+@implementer(IStreamClientEndpoint)
+class LoggingHostnameEndpoint(object):
+    """A wrapper for HostnameEndpint which logs when it connects"""
+    def __init__(self, reactor, host, port, *args, **kwargs):
+        self.host = host
+        self.port = port
+        self.ep = HostnameEndpoint(reactor, host, port, *args, **kwargs)
+
+    def connect(self, protocol_factory):
+        logger.info("Connecting to %s:%i", self.host.decode("ascii"), self.port)
+        return self.ep.connect(protocol_factory)
+
+
+def _cache_period_from_headers(headers, time_now=time.time):
+    cache_controls = _parse_cache_control(headers)
+
+    if b'no-store' in cache_controls:
+        return 0
+
+    if b'max-age' in cache_controls:
+        try:
+            max_age = int(cache_controls[b'max-age'])
+            return max_age
+        except ValueError:
+            pass
+
+    expires = headers.getRawHeaders(b'expires')
+    if expires is not None:
+        try:
+            expires_date = stringToDatetime(expires[-1])
+            return expires_date - time_now()
+        except ValueError:
+            # RFC7234 says 'A cache recipient MUST interpret invalid date formats,
+            # especially the value "0", as representing a time in the past (i.e.,
+            # "already expired").
+            return 0
+
+    return None
+
+
+def _parse_cache_control(headers):
+    cache_controls = {}
+    for hdr in headers.getRawHeaders(b'cache-control', []):
+        for directive in hdr.split(b','):
+            splits = [x.strip() for x in directive.split(b'=', 1)]
+            k = splits[0].lower()
+            v = splits[1] if len(splits) > 1 else None
+            cache_controls[k] = v
+    return cache_controls
+
+
+@attr.s
+class _RoutingResult(object):
+    """The result returned by `_route_matrix_uri`.
+
+    Contains the parameters needed to direct a federation connection to a particular
+    server.
+
+    Where a SRV record points to several servers, this object contains a single server
+    chosen from the list.
+    """
+
+    host_header = attr.ib()
+    """
+    The value we should assign to the Host header (host:port from the matrix
+    URI, or .well-known).
+
+    :type: bytes
+    """
+
+    tls_server_name = attr.ib()
+    """
+    The server name we should set in the SNI (typically host, without port, from the
+    matrix URI or .well-known)
+
+    :type: bytes
+    """
+
+    target_host = attr.ib()
+    """
+    The hostname (or IP literal) we should route the TCP connection to (the target of the
+    SRV record, or the hostname from the URL/.well-known)
+
+    :type: bytes
+    """
+
+    target_port = attr.ib()
+    """
+    The port we should route the TCP connection to (the target of the SRV record, or
+    the port from the URL/.well-known, or 8448)
+
+    :type: int
+    """
diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py
new file mode 100644
index 0000000000..71830c549d
--- /dev/null
+++ b/synapse/http/federation/srv_resolver.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import random
+import time
+
+import attr
+
+from twisted.internet import defer
+from twisted.internet.error import ConnectError
+from twisted.names import client, dns
+from twisted.names.error import DNSNameError, DomainError
+
+from synapse.util.logcontext import make_deferred_yieldable
+
+logger = logging.getLogger(__name__)
+
+SERVER_CACHE = {}
+
+
+@attr.s
+class Server(object):
+    """
+    Our record of an individual server which can be tried to reach a destination.
+
+    Attributes:
+        host (bytes): target hostname
+        port (int):
+        priority (int):
+        weight (int):
+        expires (int): when the cache should expire this record - in *seconds* since
+            the epoch
+    """
+    host = attr.ib()
+    port = attr.ib()
+    priority = attr.ib(default=0)
+    weight = attr.ib(default=0)
+    expires = attr.ib(default=0)
+
+
+def pick_server_from_list(server_list):
+    """Randomly choose a server from the server list
+
+    Args:
+        server_list (list[Server]): list of candidate servers
+
+    Returns:
+        Tuple[bytes, int]: (host, port) pair for the chosen server
+    """
+    if not server_list:
+        raise RuntimeError("pick_server_from_list called with empty list")
+
+    # TODO: currently we only use the lowest-priority servers. We should maintain a
+    # cache of servers known to be "down" and filter them out
+
+    min_priority = min(s.priority for s in server_list)
+    eligible_servers = list(s for s in server_list if s.priority == min_priority)
+    total_weight = sum(s.weight for s in eligible_servers)
+    target_weight = random.randint(0, total_weight)
+
+    for s in eligible_servers:
+        target_weight -= s.weight
+
+        if target_weight <= 0:
+            return s.host, s.port
+
+    # this should be impossible.
+    raise RuntimeError(
+        "pick_server_from_list got to end of eligible server list.",
+    )
+
+
+class SrvResolver(object):
+    """Interface to the dns client to do SRV lookups, with result caching.
+
+    The default resolver in twisted.names doesn't do any caching (it has a CacheResolver,
+    but the cache never gets populated), so we add our own caching layer here.
+
+    Args:
+        dns_client (twisted.internet.interfaces.IResolver): twisted resolver impl
+        cache (dict): cache object
+        get_time (callable): clock implementation. Should return seconds since the epoch
+    """
+    def __init__(self, dns_client=client, cache=SERVER_CACHE, get_time=time.time):
+        self._dns_client = dns_client
+        self._cache = cache
+        self._get_time = get_time
+
+    @defer.inlineCallbacks
+    def resolve_service(self, service_name):
+        """Look up a SRV record
+
+        Args:
+            service_name (bytes): record to look up
+
+        Returns:
+            Deferred[list[Server]]:
+                a list of the SRV records, or an empty list if none found
+        """
+        now = int(self._get_time())
+
+        if not isinstance(service_name, bytes):
+            raise TypeError("%r is not a byte string" % (service_name,))
+
+        cache_entry = self._cache.get(service_name, None)
+        if cache_entry:
+            if all(s.expires > now for s in cache_entry):
+                servers = list(cache_entry)
+                defer.returnValue(servers)
+
+        try:
+            answers, _, _ = yield make_deferred_yieldable(
+                self._dns_client.lookupService(service_name),
+            )
+        except DNSNameError:
+            # TODO: cache this. We can get the SOA out of the exception, and use
+            # the negative-TTL value.
+            defer.returnValue([])
+        except DomainError as e:
+            # We failed to resolve the name (other than a NameError)
+            # Try something in the cache, else rereaise
+            cache_entry = self._cache.get(service_name, None)
+            if cache_entry:
+                logger.warn(
+                    "Failed to resolve %r, falling back to cache. %r",
+                    service_name, e
+                )
+                defer.returnValue(list(cache_entry))
+            else:
+                raise e
+
+        if (len(answers) == 1
+                and answers[0].type == dns.SRV
+                and answers[0].payload
+                and answers[0].payload.target == dns.Name(b'.')):
+            raise ConnectError("Service %s unavailable" % service_name)
+
+        servers = []
+
+        for answer in answers:
+            if answer.type != dns.SRV or not answer.payload:
+                continue
+
+            payload = answer.payload
+
+            servers.append(Server(
+                host=payload.target.name,
+                port=payload.port,
+                priority=payload.priority,
+                weight=payload.weight,
+                expires=now + answer.ttl,
+            ))
+
+        self._cache[service_name] = list(servers)
+        defer.returnValue(servers)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 24b6110c20..1682c9af13 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -19,7 +19,7 @@ import random
 import sys
 from io import BytesIO
 
-from six import PY3, string_types
+from six import PY3, raise_from, string_types
 from six.moves import urllib
 
 import attr
@@ -32,7 +32,6 @@ from twisted.internet import defer, protocol
 from twisted.internet.error import DNSLookupError
 from twisted.internet.task import _EPSILON, Cooperator
 from twisted.web._newclient import ResponseDone
-from twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool
 from twisted.web.http_headers import Headers
 
 import synapse.metrics
@@ -41,9 +40,11 @@ from synapse.api.errors import (
     Codes,
     FederationDeniedError,
     HttpResponseException,
+    RequestSendFailed,
     SynapseError,
 )
-from synapse.http.endpoint import matrix_federation_endpoint
+from synapse.http import QuieterFileBodyProducer
+from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
 from synapse.util.async_helpers import timeout_deferred
 from synapse.util.logcontext import make_deferred_yieldable
 from synapse.util.metrics import Measure
@@ -65,20 +66,6 @@ else:
     MAXINT = sys.maxint
 
 
-class MatrixFederationEndpointFactory(object):
-    def __init__(self, hs):
-        self.reactor = hs.get_reactor()
-        self.tls_client_options_factory = hs.tls_client_options_factory
-
-    def endpointForURI(self, uri):
-        destination = uri.netloc.decode('ascii')
-
-        return matrix_federation_endpoint(
-            self.reactor, destination, timeout=10,
-            tls_client_options_factory=self.tls_client_options_factory
-        )
-
-
 _next_id = 1
 
 
@@ -181,17 +168,15 @@ class MatrixFederationHttpClient(object):
             requests.
     """
 
-    def __init__(self, hs):
+    def __init__(self, hs, tls_client_options_factory):
         self.hs = hs
         self.signing_key = hs.config.signing_key[0]
         self.server_name = hs.hostname
         reactor = hs.get_reactor()
-        pool = HTTPConnectionPool(reactor)
-        pool.retryAutomatically = False
-        pool.maxPersistentPerHost = 5
-        pool.cachedConnectionTimeout = 2 * 60
-        self.agent = Agent.usingEndpointFactory(
-            reactor, MatrixFederationEndpointFactory(hs), pool=pool
+
+        self.agent = MatrixFederationAgent(
+            hs.get_reactor(),
+            tls_client_options_factory,
         )
         self.clock = hs.get_clock()
         self._store = hs.get_datastore()
@@ -228,19 +213,18 @@ class MatrixFederationHttpClient(object):
             backoff_on_404 (bool): Back off if we get a 404
 
         Returns:
-            Deferred: resolves with the http response object on success.
-
-            Fails with ``HttpResponseException``: if we get an HTTP response
-                code >= 300.
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-                to retry this server.
-
-            Fails with ``FederationDeniedError`` if this destination
-                is not on our federation whitelist
-
-            (May also fail with plenty of other Exceptions for things like DNS
-                failures, connection failures, SSL failures.)
+            Deferred[twisted.web.client.Response]: resolves with the HTTP
+            response object on success.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
         if timeout:
             _sec_timeout = timeout / 1000
@@ -271,7 +255,6 @@ class MatrixFederationHttpClient(object):
 
         headers_dict = {
             b"User-Agent": [self.version_string_bytes],
-            b"Host": [destination_bytes],
         }
 
         with limiter:
@@ -298,60 +281,111 @@ class MatrixFederationHttpClient(object):
                     json = request.get_json()
                     if json:
                         headers_dict[b"Content-Type"] = [b"application/json"]
-                        self.sign_request(
+                        auth_headers = self.build_auth_headers(
                             destination_bytes, method_bytes, url_to_sign_bytes,
-                            headers_dict, json,
+                            json,
                         )
                         data = encode_canonical_json(json)
-                        producer = FileBodyProducer(
+                        producer = QuieterFileBodyProducer(
                             BytesIO(data),
                             cooperator=self._cooperator,
                         )
                     else:
                         producer = None
-                        self.sign_request(
+                        auth_headers = self.build_auth_headers(
                             destination_bytes, method_bytes, url_to_sign_bytes,
-                            headers_dict,
                         )
 
+                    headers_dict[b"Authorization"] = auth_headers
+
                     logger.info(
-                        "{%s} [%s] Sending request: %s %s",
+                        "{%s} [%s] Sending request: %s %s; timeout %fs",
                         request.txn_id, request.destination, request.method,
-                        url_str,
+                        url_str, _sec_timeout,
                     )
 
-                    # we don't want all the fancy cookie and redirect handling that
-                    # treq.request gives: just use the raw Agent.
-                    request_deferred = self.agent.request(
-                        method_bytes,
-                        url_bytes,
-                        headers=Headers(headers_dict),
-                        bodyProducer=producer,
-                    )
+                    try:
+                        with Measure(self.clock, "outbound_request"):
+                            # we don't want all the fancy cookie and redirect handling
+                            # that treq.request gives: just use the raw Agent.
+                            request_deferred = self.agent.request(
+                                method_bytes,
+                                url_bytes,
+                                headers=Headers(headers_dict),
+                                bodyProducer=producer,
+                            )
+
+                            request_deferred = timeout_deferred(
+                                request_deferred,
+                                timeout=_sec_timeout,
+                                reactor=self.hs.get_reactor(),
+                            )
+
+                            response = yield request_deferred
+                    except DNSLookupError as e:
+                        raise_from(RequestSendFailed(e, can_retry=retry_on_dns_fail), e)
+                    except Exception as e:
+                        logger.info("Failed to send request: %s", e)
+                        raise_from(RequestSendFailed(e, can_retry=True), e)
 
-                    request_deferred = timeout_deferred(
-                        request_deferred,
-                        timeout=_sec_timeout,
-                        reactor=self.hs.get_reactor(),
+                    logger.info(
+                        "{%s} [%s] Got response headers: %d %s",
+                        request.txn_id,
+                        request.destination,
+                        response.code,
+                        response.phrase.decode('ascii', errors='replace'),
                     )
 
-                    with Measure(self.clock, "outbound_request"):
-                        response = yield make_deferred_yieldable(
-                            request_deferred,
+                    if 200 <= response.code < 300:
+                        pass
+                    else:
+                        # :'(
+                        # Update transactions table?
+                        d = treq.content(response)
+                        d = timeout_deferred(
+                            d,
+                            timeout=_sec_timeout,
+                            reactor=self.hs.get_reactor(),
+                        )
+
+                        try:
+                            body = yield make_deferred_yieldable(d)
+                        except Exception as e:
+                            # Eh, we're already going to raise an exception so lets
+                            # ignore if this fails.
+                            logger.warn(
+                                "{%s} [%s] Failed to get error response: %s %s: %s",
+                                request.txn_id,
+                                request.destination,
+                                request.method,
+                                url_str,
+                                _flatten_response_never_received(e),
+                            )
+                            body = None
+
+                        e = HttpResponseException(
+                            response.code, response.phrase, body
                         )
 
+                        # Retry if the error is a 429 (Too Many Requests),
+                        # otherwise just raise a standard HttpResponseException
+                        if response.code == 429:
+                            raise_from(RequestSendFailed(e, can_retry=True), e)
+                        else:
+                            raise e
+
                     break
-                except Exception as e:
+                except RequestSendFailed as e:
                     logger.warn(
                         "{%s} [%s] Request failed: %s %s: %s",
                         request.txn_id,
                         request.destination,
                         request.method,
                         url_str,
-                        _flatten_response_never_received(e),
+                        _flatten_response_never_received(e.inner_exception),
                     )
 
-                    if not retry_on_dns_fail and isinstance(e, DNSLookupError):
+                    if not e.can_retry:
                         raise
 
                     if retries_left and not timeout:
@@ -376,50 +410,36 @@ class MatrixFederationHttpClient(object):
                     else:
                         raise
 
-            logger.info(
-                "{%s} [%s] Got response headers: %d %s",
-                request.txn_id,
-                request.destination,
-                response.code,
-                response.phrase.decode('ascii', errors='replace'),
-            )
-
-            if 200 <= response.code < 300:
-                pass
-            else:
-                # :'(
-                # Update transactions table?
-                d = treq.content(response)
-                d = timeout_deferred(
-                    d,
-                    timeout=_sec_timeout,
-                    reactor=self.hs.get_reactor(),
-                )
-                body = yield make_deferred_yieldable(d)
-                raise HttpResponseException(
-                    response.code, response.phrase, body
-                )
+                except Exception as e:
+                    logger.warn(
+                        "{%s} [%s] Request failed: %s %s: %s",
+                        request.txn_id,
+                        request.destination,
+                        request.method,
+                        url_str,
+                        _flatten_response_never_received(e),
+                    )
+                    raise
 
             defer.returnValue(response)
 
-    def sign_request(self, destination, method, url_bytes, headers_dict,
-                     content=None, destination_is=None):
+    def build_auth_headers(
+        self, destination, method, url_bytes, content=None, destination_is=None,
+    ):
         """
-        Signs a request by adding an Authorization header to headers_dict
+        Builds the Authorization headers for a federation request
         Args:
             destination (bytes|None): The desination home server of the request.
                 May be None if the destination is an identity server, in which case
                 destination_is must be non-None.
             method (bytes): The HTTP method of the request
             url_bytes (bytes): The URI path of the request
-            headers_dict (dict[bytes, list[bytes]]): Dictionary of request headers to
-                append to
             content (object): The body of the request
             destination_is (bytes): As 'destination', but if the destination is an
                 identity server
 
         Returns:
-            None
+            list[bytes]: a list of headers to be added as "Authorization:" headers
         """
         request = {
             "method": method,
@@ -446,8 +466,7 @@ class MatrixFederationHttpClient(object):
                     self.server_name, key, sig,
                 )).encode('ascii')
             )
-
-        headers_dict[b"Authorization"] = auth_headers
+        return auth_headers
 
     @defer.inlineCallbacks
     def put_json(self, destination, path, args={}, data={},
@@ -477,17 +496,18 @@ class MatrixFederationHttpClient(object):
                 requests)
 
         Returns:
-            Deferred: Succeeds when we get a 2xx HTTP response. The result
-            will be the decoded JSON body.
-
-            Fails with ``HttpResponseException`` if we get an HTTP response
-            code >= 300.
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-            to retry this server.
-
-            Fails with ``FederationDeniedError`` if this destination
-            is not on our federation whitelist
+            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            result will be the decoded JSON body.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
 
         request = MatrixFederationRequest(
@@ -531,17 +551,18 @@ class MatrixFederationHttpClient(object):
                 try the request anyway.
             args (dict): query params
         Returns:
-            Deferred: Succeeds when we get a 2xx HTTP response. The result
-            will be the decoded JSON body.
-
-            Fails with ``HttpResponseException`` if we get an HTTP response
-            code >= 300.
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-            to retry this server.
-
-            Fails with ``FederationDeniedError`` if this destination
-            is not on our federation whitelist
+            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            result will be the decoded JSON body.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
 
         request = MatrixFederationRequest(
@@ -586,17 +607,18 @@ class MatrixFederationHttpClient(object):
             ignore_backoff (bool): true to ignore the historical backoff data
                 and try the request anyway.
         Returns:
-            Deferred: Succeeds when we get a 2xx HTTP response. The result
-            will be the decoded JSON body.
-
-            Fails with ``HttpResponseException`` if we get an HTTP response
-            code >= 300.
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-            to retry this server.
-
-            Fails with ``FederationDeniedError`` if this destination
-            is not on our federation whitelist
+            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            result will be the decoded JSON body.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
         logger.debug("get_json args: %s", args)
 
@@ -637,17 +659,18 @@ class MatrixFederationHttpClient(object):
             ignore_backoff (bool): true to ignore the historical backoff data and
                 try the request anyway.
         Returns:
-            Deferred: Succeeds when we get a 2xx HTTP response. The result
-            will be the decoded JSON body.
-
-            Fails with ``HttpResponseException`` if we get an HTTP response
-            code >= 300.
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-            to retry this server.
-
-            Fails with ``FederationDeniedError`` if this destination
-            is not on our federation whitelist
+            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            result will be the decoded JSON body.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
         request = MatrixFederationRequest(
             method="DELETE",
@@ -680,18 +703,20 @@ class MatrixFederationHttpClient(object):
             args (dict): Optional dictionary used to create the query string.
             ignore_backoff (bool): true to ignore the historical backoff data
                 and try the request anyway.
-        Returns:
-            Deferred: resolves with an (int,dict) tuple of the file length and
-            a dict of the response headers.
-
-            Fails with ``HttpResponseException`` if we get an HTTP response code
-            >= 300
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-            to retry this server.
 
-            Fails with ``FederationDeniedError`` if this destination
-            is not on our federation whitelist
+        Returns:
+            Deferred[tuple[int, dict]]: Resolves with an (int,dict) tuple of
+            the file length and a dict of the response headers.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
         request = MatrixFederationRequest(
             method="GET",
@@ -784,21 +809,21 @@ def check_content_type_is_json(headers):
         headers (twisted.web.http_headers.Headers): headers to check
 
     Raises:
-        RuntimeError if the
+        RequestSendFailed: if the Content-Type header is missing or isn't JSON
 
     """
     c_type = headers.getRawHeaders(b"Content-Type")
     if c_type is None:
-        raise RuntimeError(
+        raise RequestSendFailed(RuntimeError(
             "No Content-Type header"
-        )
+        ), can_retry=False)
 
     c_type = c_type[0].decode('ascii')  # only the first header
     val, options = cgi.parse_header(c_type)
     if val != "application/json":
-        raise RuntimeError(
+        raise RequestSendFailed(RuntimeError(
             "Content-Type not application/json: was '%s'" % c_type
-        )
+        ), can_retry=False)
 
 
 def encode_query_args(args):
diff --git a/synapse/http/server.py b/synapse/http/server.py
index b4b25cab19..16fb7935da 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -106,10 +106,10 @@ def wrap_json_request_handler(h):
             # trace.
             f = failure.Failure()
             logger.error(
-                "Failed handle request via %r: %r: %s",
-                h,
+                "Failed handle request via %r: %r",
+                request.request_metrics.name,
                 request,
-                f.getTraceback().rstrip(),
+                exc_info=(f.type, f.value, f.getTracebackObject()),
             )
             # Only respond with an error response if we haven't already started
             # writing, otherwise lets just kill the connection
@@ -169,18 +169,18 @@ def _return_html_error(f, request):
             )
         else:
             logger.error(
-                "Failed handle request %r: %s",
+                "Failed handle request %r",
                 request,
-                f.getTraceback().rstrip(),
+                exc_info=(f.type, f.value, f.getTracebackObject()),
             )
     else:
         code = http_client.INTERNAL_SERVER_ERROR
         msg = "Internal server error"
 
         logger.error(
-            "Failed handle request %r: %s",
+            "Failed handle request %r",
             request,
-            f.getTraceback().rstrip(),
+            exc_info=(f.type, f.value, f.getTracebackObject()),
         )
 
     body = HTML_ERROR_TEMPLATE.format(
@@ -468,13 +468,13 @@ def set_cors_headers(request):
     Args:
         request (twisted.web.http.Request): The http request to add CORs to.
     """
-    request.setHeader("Access-Control-Allow-Origin", "*")
+    request.setHeader(b"Access-Control-Allow-Origin", b"*")
     request.setHeader(
-        "Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS"
+        b"Access-Control-Allow-Methods", b"GET, POST, PUT, DELETE, OPTIONS"
     )
     request.setHeader(
-        "Access-Control-Allow-Headers",
-        "Origin, X-Requested-With, Content-Type, Accept, Authorization"
+        b"Access-Control-Allow-Headers",
+        b"Origin, X-Requested-With, Content-Type, Accept, Authorization"
     )
 
 
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index a1e4b88e6d..528125e737 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -121,16 +121,15 @@ def parse_string(request, name, default=None, required=False,
 
     Args:
         request: the twisted HTTP request.
-        name (bytes/unicode): the name of the query parameter.
-        default (bytes/unicode|None): value to use if the parameter is absent,
+        name (bytes|unicode): the name of the query parameter.
+        default (bytes|unicode|None): value to use if the parameter is absent,
             defaults to None. Must be bytes if encoding is None.
         required (bool): whether to raise a 400 SynapseError if the
             parameter is absent, defaults to False.
-        allowed_values (list[bytes/unicode]): List of allowed values for the
+        allowed_values (list[bytes|unicode]): List of allowed values for the
             string, or None if any value is allowed, defaults to None. Must be
             the same type as name, if given.
-        encoding: The encoding to decode the name to, and decode the string
-            content with.
+        encoding (str|None): The encoding to decode the string content with.
 
     Returns:
         bytes/unicode|None: A string value or the default. Unicode if encoding
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 59900aa5d1..ef48984fdd 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -274,8 +274,6 @@ pending_calls_metric = Histogram(
 # Federation Metrics
 #
 
-sent_edus_counter = Counter("synapse_federation_client_sent_edus", "")
-
 sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "")
 
 events_processed_counter = Counter("synapse_federation_client_events_processed", "")
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 097c844d31..fc9a20ff59 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -79,7 +79,7 @@ class ModuleApi(object):
         Returns:
             Deferred: a 2-tuple of (user_id, access_token)
         """
-        reg = self.hs.get_handlers().registration_handler
+        reg = self.hs.get_registration_handler()
         return reg.register(localpart=localpart)
 
     @defer.inlineCallbacks
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index ecbf364a5e..8bd96b1178 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -84,7 +84,7 @@ def _rule_to_template(rule):
         templaterule["pattern"] = thecond["pattern"]
 
     if unscoped_rule_id:
-            templaterule['rule_id'] = unscoped_rule_id
+        templaterule['rule_id'] = unscoped_rule_id
     if 'default' in rule:
         templaterule['default'] = rule['default']
     return templaterule
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index f369124258..50e1007d84 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -85,7 +85,10 @@ class EmailPusher(object):
             self.timed_call = None
 
     def on_new_notifications(self, min_stream_ordering, max_stream_ordering):
-        self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
+        if self.max_stream_ordering:
+            self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
+        else:
+            self.max_stream_ordering = max_stream_ordering
         self._start_processing()
 
     def on_new_receipts(self, min_stream_id, max_stream_id):
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 6bd703632d..e65f8c63d3 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -32,9 +32,25 @@ if six.PY3:
 
 logger = logging.getLogger(__name__)
 
-http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "")
+http_push_processed_counter = Counter(
+    "synapse_http_httppusher_http_pushes_processed",
+    "Number of push notifications successfully sent",
+)
 
-http_push_failed_counter = Counter("synapse_http_httppusher_http_pushes_failed", "")
+http_push_failed_counter = Counter(
+    "synapse_http_httppusher_http_pushes_failed",
+    "Number of push notifications which failed",
+)
+
+http_badges_processed_counter = Counter(
+    "synapse_http_httppusher_badge_updates_processed",
+    "Number of badge updates successfully sent",
+)
+
+http_badges_failed_counter = Counter(
+    "synapse_http_httppusher_badge_updates_failed",
+    "Number of badge updates which failed",
+)
 
 
 class HttpPusher(object):
@@ -81,6 +97,11 @@ class HttpPusher(object):
             pusherdict['pushkey'],
         )
 
+        if self.data is None:
+            raise PusherConfigException(
+                "data can not be null for HTTP pusher"
+            )
+
         if 'url' not in self.data:
             raise PusherConfigException(
                 "'url' required in data for HTTP pusher"
@@ -311,10 +332,10 @@ class HttpPusher(object):
                 ]
             }
         }
-        if event.type == 'm.room.member':
+        if event.type == 'm.room.member' and event.is_state():
             d['notification']['membership'] = event.content['membership']
             d['notification']['user_is_target'] = event.state_key == self.user_id
-        if self.hs.config.push_include_content and 'content' in event:
+        if self.hs.config.push_include_content and event.content:
             d['notification']['content'] = event.content
 
         # We no longer send aliases separately, instead, we send the human
@@ -333,10 +354,10 @@ class HttpPusher(object):
             defer.returnValue([])
         try:
             resp = yield self.http_client.post_json_get_json(self.url, notification_dict)
-        except Exception:
-            logger.warn(
-                "Failed to push event %s to %s",
-                event.event_id, self.name, exc_info=True,
+        except Exception as e:
+            logger.warning(
+                "Failed to push event %s to %s: %s %s",
+                event.event_id, self.name, type(e), e,
             )
             defer.returnValue(False)
         rejected = []
@@ -346,6 +367,10 @@ class HttpPusher(object):
 
     @defer.inlineCallbacks
     def _send_badge(self, badge):
+        """
+        Args:
+            badge (int): number of unread messages
+        """
         logger.info("Sending updated badge count %d to %s", badge, self.name)
         d = {
             'notification': {
@@ -366,14 +391,11 @@ class HttpPusher(object):
             }
         }
         try:
-            resp = yield self.http_client.post_json_get_json(self.url, d)
-        except Exception:
-            logger.warn(
-                "Failed to send badge count to %s",
-                self.name, exc_info=True,
+            yield self.http_client.post_json_get_json(self.url, d)
+            http_badges_processed_counter.inc()
+        except Exception as e:
+            logger.warning(
+                "Failed to send badge count to %s: %s %s",
+                self.name, type(e), e,
             )
-            defer.returnValue(False)
-        rejected = []
-        if 'rejected' in resp:
-            rejected = resp['rejected']
-        defer.returnValue(rejected)
+            http_badges_failed_counter.inc()
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 16fb5e8471..1eb5be0957 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -26,7 +26,6 @@ import bleach
 import jinja2
 
 from twisted.internet import defer
-from twisted.mail.smtp import sendmail
 
 from synapse.api.constants import EventTypes
 from synapse.api.errors import StoreError
@@ -37,6 +36,7 @@ from synapse.push.presentable_names import (
 )
 from synapse.types import UserID
 from synapse.util.async_helpers import concurrently_execute
+from synapse.util.logcontext import make_deferred_yieldable
 from synapse.visibility import filter_events_for_client
 
 logger = logging.getLogger(__name__)
@@ -85,6 +85,7 @@ class Mailer(object):
         self.notif_template_html = notif_template_html
         self.notif_template_text = notif_template_text
 
+        self.sendmail = self.hs.get_sendmail()
         self.store = self.hs.get_datastore()
         self.macaroon_gen = self.hs.get_macaroon_generator()
         self.state_handler = self.hs.get_state_handler()
@@ -191,17 +192,17 @@ class Mailer(object):
         multipart_msg.attach(html_part)
 
         logger.info("Sending email push notification to %s" % email_address)
-        # logger.debug(html_text)
 
-        yield sendmail(
+        yield make_deferred_yieldable(self.sendmail(
             self.hs.config.email_smtp_host,
-            raw_from, raw_to, multipart_msg.as_string(),
+            raw_from, raw_to, multipart_msg.as_string().encode('utf8'),
+            reactor=self.hs.get_reactor(),
             port=self.hs.config.email_smtp_port,
             requireAuthentication=self.hs.config.email_smtp_user is not None,
             username=self.hs.config.email_smtp_user,
             password=self.hs.config.email_smtp_pass,
             requireTransportSecurity=self.hs.config.require_transport_security
-        )
+        ))
 
     @defer.inlineCallbacks
     def get_room_vars(self, room_id, user_id, notifs, notif_events, room_state_ids):
@@ -333,7 +334,7 @@ class Mailer(object):
                           notif_events, user_id, reason):
         if len(notifs_by_room) == 1:
             # Only one room has new stuff
-            room_id = notifs_by_room.keys()[0]
+            room_id = list(notifs_by_room.keys())[0]
 
             # If the room has some kind of name, use it, but we don't
             # want the generated-from-names one here otherwise we'll
diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py
index 2bd321d530..cf6c8b875e 100644
--- a/synapse/push/push_rule_evaluator.py
+++ b/synapse/push/push_rule_evaluator.py
@@ -124,7 +124,7 @@ class PushRuleEvaluatorForEvent(object):
 
         # XXX: optimisation: cache our pattern regexps
         if condition['key'] == 'content.body':
-            body = self._event["content"].get("body", None)
+            body = self._event.content.get("body", None)
             if not body:
                 return False
 
@@ -140,7 +140,7 @@ class PushRuleEvaluatorForEvent(object):
         if not display_name:
             return False
 
-        body = self._event["content"].get("body", None)
+        body = self._event.content.get("body", None)
         if not body:
             return False
 
diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py
index fcee6d9d7e..b33f2a357b 100644
--- a/synapse/push/pusher.py
+++ b/synapse/push/pusher.py
@@ -52,11 +52,12 @@ class PusherFactory(object):
             logger.info("defined email pusher type")
 
     def create_pusher(self, pusherdict):
-        logger.info("trying to create_pusher for %r", pusherdict)
-
-        if pusherdict['kind'] in self.pusher_types:
-            logger.info("found pusher")
-            return self.pusher_types[pusherdict['kind']](self.hs, pusherdict)
+        kind = pusherdict['kind']
+        f = self.pusher_types.get(kind, None)
+        if not f:
+            return None
+        logger.debug("creating %s pusher for %r", kind, pusherdict)
+        return f(self.hs, pusherdict)
 
     def _create_email_pusher(self, _hs, pusherdict):
         app_name = self._app_name_from_pusherdict(pusherdict)
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 5a4e73ccd6..abf1a1a9c1 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -19,6 +19,7 @@ import logging
 from twisted.internet import defer
 
 from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.push import PusherConfigException
 from synapse.push.pusher import PusherFactory
 
 logger = logging.getLogger(__name__)
@@ -140,6 +141,10 @@ class PusherPool:
 
     @defer.inlineCallbacks
     def on_new_notifications(self, min_stream_id, max_stream_id):
+        if not self.pushers:
+            # nothing to do here.
+            return
+
         try:
             users_affected = yield self.store.get_push_action_users_in_range(
                 min_stream_id, max_stream_id
@@ -155,6 +160,10 @@ class PusherPool:
 
     @defer.inlineCallbacks
     def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
+        if not self.pushers:
+            # nothing to do here.
+            return
+
         try:
             # Need to subtract 1 from the minimum because the lower bound here
             # is not inclusive
@@ -214,6 +223,15 @@ class PusherPool:
         """
         try:
             p = self.pusher_factory.create_pusher(pusherdict)
+        except PusherConfigException as e:
+            logger.warning(
+                "Pusher incorrectly configured user=%s, appid=%s, pushkey=%s: %s",
+                pusherdict.get('user_name'),
+                pusherdict.get('app_id'),
+                pusherdict.get('pushkey'),
+                e,
+            )
+            return
         except Exception:
             logger.exception("Couldn't start a pusher: caught Exception")
             return
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 943876456b..f71e21ff4d 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -15,177 +15,153 @@
 # limitations under the License.
 
 import logging
-from distutils.version import LooseVersion
+
+from pkg_resources import DistributionNotFound, VersionConflict, get_distribution
 
 logger = logging.getLogger(__name__)
 
-# this dict maps from python package name to a list of modules we expect it to
-# provide.
-#
-# the key is a "requirement specifier", as used as a parameter to `pip
-# install`[1], or an `install_requires` argument to `setuptools.setup` [2].
+
+# REQUIREMENTS is a simple list of requirement specifiers[1], and must be
+# installed. It is passed to setup() as install_requires in setup.py.
 #
-# the value is a sequence of strings; each entry should be the name of the
-# python module, optionally followed by a version assertion which can be either
-# ">=<ver>" or "==<ver>".
+# CONDITIONAL_REQUIREMENTS is the optional dependencies, represented as a dict
+# of lists. The dict key is the optional dependency name and can be passed to
+# pip when installing. The list is a series of requirement specifiers[1] to be
+# installed when that optional dependency requirement is specified. It is passed
+# to setup() as extras_require in setup.py
 #
 # [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
-# [2] https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies
-REQUIREMENTS = {
-    "jsonschema>=2.5.1": ["jsonschema>=2.5.1"],
-    "frozendict>=1": ["frozendict"],
-    "unpaddedbase64>=1.1.0": ["unpaddedbase64>=1.1.0"],
-    "canonicaljson>=1.1.3": ["canonicaljson>=1.1.3"],
-    "signedjson>=1.0.0": ["signedjson>=1.0.0"],
-    "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"],
-    "service_identity>=16.0.0": ["service_identity>=16.0.0"],
-    "Twisted>=17.1.0": ["twisted>=17.1.0"],
-    "treq>=15.1": ["treq>=15.1"],
 
-    # Twisted has required pyopenssl 16.0 since about Twisted 16.6.
-    "pyopenssl>=16.0.0": ["OpenSSL>=16.0.0"],
-
-    "pyyaml>=3.11": ["yaml"],
-    "pyasn1>=0.1.9": ["pyasn1"],
-    "pyasn1-modules>=0.0.7": ["pyasn1_modules"],
-    "daemonize>=2.3.1": ["daemonize"],
-    "bcrypt>=3.1.0": ["bcrypt>=3.1.0"],
-    "pillow>=3.1.2": ["PIL"],
-    "pydenticon>=0.2": ["pydenticon"],
-    "sortedcontainers>=1.4.4": ["sortedcontainers"],
-    "psutil>=2.0.0": ["psutil>=2.0.0"],
-    "pysaml2>=3.0.0": ["saml2"],
-    "pymacaroons-pynacl>=0.9.3": ["pymacaroons"],
-    "msgpack-python>=0.4.2": ["msgpack"],
-    "phonenumbers>=8.2.0": ["phonenumbers"],
-    "six>=1.10": ["six"],
+REQUIREMENTS = [
+    "jsonschema>=2.5.1",
+    "frozendict>=1",
+    "unpaddedbase64>=1.1.0",
+    "canonicaljson>=1.1.3",
+    "signedjson>=1.0.0",
+    "pynacl>=1.2.1",
+    "service_identity>=16.0.0",
+
+    # our logcontext handling relies on the ability to cancel inlineCallbacks
+    # (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
+    "Twisted>=18.7.0",
 
+    "treq>=15.1",
+    # Twisted has required pyopenssl 16.0 since about Twisted 16.6.
+    "pyopenssl>=16.0.0",
+    "pyyaml>=3.11",
+    "pyasn1>=0.1.9",
+    "pyasn1-modules>=0.0.7",
+    "daemonize>=2.3.1",
+    "bcrypt>=3.1.0",
+    "pillow>=3.1.2",
+    "sortedcontainers>=1.4.4",
+    "psutil>=2.0.0",
+    "pymacaroons>=0.13.0",
+    "msgpack>=0.5.0",
+    "phonenumbers>=8.2.0",
+    "six>=1.10",
     # prometheus_client 0.4.0 changed the format of counter metrics
     # (cf https://github.com/matrix-org/synapse/issues/4001)
-    "prometheus_client>=0.0.18,<0.4.0": ["prometheus_client"],
+    "prometheus_client>=0.0.18,<0.4.0",
 
     # we use attr.s(slots), which arrived in 16.0.0
-    "attrs>=16.0.0": ["attr>=16.0.0"],
-    "netaddr>=0.7.18": ["netaddr"],
-}
-
-CONDITIONAL_REQUIREMENTS = {
-    "web_client": {
-        "matrix_angular_sdk>=0.6.8": ["syweb>=0.6.8"],
-    },
-    "email.enable_notifs": {
-        "Jinja2>=2.8": ["Jinja2>=2.8"],
-        "bleach>=1.4.2": ["bleach>=1.4.2"],
-    },
-    "matrix-synapse-ldap3": {
-        "matrix-synapse-ldap3>=0.1": ["ldap_auth_provider"],
-    },
-    "postgres": {
-        "psycopg2>=2.6": ["psycopg2"]
-    }
-}
+    # Twisted 18.7.0 requires attrs>=17.4.0
+    "attrs>=17.4.0",
 
+    "netaddr>=0.7.18",
+]
 
-def requirements(config=None, include_conditional=False):
-    reqs = REQUIREMENTS.copy()
-    if include_conditional:
-        for _, req in CONDITIONAL_REQUIREMENTS.items():
-            reqs.update(req)
-    return reqs
-
+CONDITIONAL_REQUIREMENTS = {
+    "email.enable_notifs": ["Jinja2>=2.9", "bleach>=1.4.2"],
+    "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
+    "postgres": ["psycopg2>=2.6"],
 
-def github_link(project, version, egg):
-    return "https://github.com/%s/tarball/%s/#egg=%s" % (project, version, egg)
+    # ConsentResource uses select_autoescape, which arrived in jinja 2.9
+    "resources.consent": ["Jinja2>=2.9"],
 
+    # ACME support is required to provision TLS certificates from authorities
+    # that use the protocol, such as Let's Encrypt.
+    "acme": ["txacme>=0.9.2"],
 
-DEPENDENCY_LINKS = {
+    "saml2": ["pysaml2>=4.5.0"],
+    "url_preview": ["lxml>=3.5.0"],
+    "test": ["mock>=2.0", "parameterized"],
+    "sentry": ["sentry-sdk>=0.7.2"],
 }
 
 
-class MissingRequirementError(Exception):
-    def __init__(self, message, module_name, dependency):
-        super(MissingRequirementError, self).__init__(message)
-        self.module_name = module_name
-        self.dependency = dependency
-
-
-def check_requirements(config=None):
-    """Checks that all the modules needed by synapse have been correctly
-    installed and are at the correct version"""
-    for dependency, module_requirements in (
-            requirements(config, include_conditional=False).items()):
-        for module_requirement in module_requirements:
-            if ">=" in module_requirement:
-                module_name, required_version = module_requirement.split(">=")
-                version_test = ">="
-            elif "==" in module_requirement:
-                module_name, required_version = module_requirement.split("==")
-                version_test = "=="
-            else:
-                module_name = module_requirement
-                version_test = None
+def list_requirements():
+    deps = set(REQUIREMENTS)
+    for opt in CONDITIONAL_REQUIREMENTS.values():
+        deps = set(opt) | deps
+
+    return list(deps)
+
+
+class DependencyException(Exception):
+    @property
+    def message(self):
+        return "\n".join([
+            "Missing Requirements: %s" % (", ".join(self.dependencies),),
+            "To install run:",
+            "    pip install --upgrade --force %s" % (" ".join(self.dependencies),),
+            "",
+        ])
+
+    @property
+    def dependencies(self):
+        for i in self.args[0]:
+            yield '"' + i + '"'
+
+
+def check_requirements(for_feature=None, _get_distribution=get_distribution):
+    deps_needed = []
+    errors = []
+
+    if for_feature:
+        reqs = CONDITIONAL_REQUIREMENTS[for_feature]
+    else:
+        reqs = REQUIREMENTS
+
+    for dependency in reqs:
+        try:
+            _get_distribution(dependency)
+        except VersionConflict as e:
+            deps_needed.append(dependency)
+            errors.append(
+                "Needed %s, got %s==%s"
+                % (dependency, e.dist.project_name, e.dist.version)
+            )
+        except DistributionNotFound:
+            deps_needed.append(dependency)
+            errors.append("Needed %s but it was not installed" % (dependency,))
+
+    if not for_feature:
+        # Check the optional dependencies are up to date. We allow them to not be
+        # installed.
+        OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), [])
 
+        for dependency in OPTS:
             try:
-                module = __import__(module_name)
-            except ImportError:
-                logging.exception(
-                    "Can't import %r which is part of %r",
-                    module_name, dependency
+                _get_distribution(dependency)
+            except VersionConflict as e:
+                deps_needed.append(dependency)
+                errors.append(
+                    "Needed optional %s, got %s==%s"
+                    % (dependency, e.dist.project_name, e.dist.version)
                 )
-                raise MissingRequirementError(
-                    "Can't import %r which is part of %r"
-                    % (module_name, dependency), module_name, dependency
-                )
-            version = getattr(module, "__version__", None)
-            file_path = getattr(module, "__file__", None)
-            logger.info(
-                "Using %r version %r from %r to satisfy %r",
-                module_name, version, file_path, dependency
-            )
+            except DistributionNotFound:
+                # If it's not found, we don't care
+                pass
 
-            if version_test == ">=":
-                if version is None:
-                    raise MissingRequirementError(
-                        "Version of %r isn't set as __version__ of module %r"
-                        % (dependency, module_name), module_name, dependency
-                    )
-                if LooseVersion(version) < LooseVersion(required_version):
-                    raise MissingRequirementError(
-                        "Version of %r in %r is too old. %r < %r"
-                        % (dependency, file_path, version, required_version),
-                        module_name, dependency
-                    )
-            elif version_test == "==":
-                if version is None:
-                    raise MissingRequirementError(
-                        "Version of %r isn't set as __version__ of module %r"
-                        % (dependency, module_name), module_name, dependency
-                    )
-                if LooseVersion(version) != LooseVersion(required_version):
-                    raise MissingRequirementError(
-                        "Unexpected version of %r in %r. %r != %r"
-                        % (dependency, file_path, version, required_version),
-                        module_name, dependency
-                    )
+    if deps_needed:
+        for e in errors:
+            logging.error(e)
 
-
-def list_requirements():
-    result = []
-    linked = []
-    for link in DEPENDENCY_LINKS.values():
-        egg = link.split("#egg=")[1]
-        linked.append(egg.split('-')[0])
-        result.append(link)
-    for requirement in requirements(include_conditional=True):
-        is_linked = False
-        for link in linked:
-            if requirement.replace('-', '_').startswith(link):
-                is_linked = True
-        if not is_linked:
-            result.append(requirement)
-    return result
+        raise DependencyException(deps_needed)
 
 
 if __name__ == "__main__":
     import sys
+
     sys.stdout.writelines(req + "\n" for req in list_requirements())
diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py
index 19f214281e..81b85352b1 100644
--- a/synapse/replication/http/__init__.py
+++ b/synapse/replication/http/__init__.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 from synapse.http.server import JsonResource
-from synapse.replication.http import federation, membership, send_event
+from synapse.replication.http import federation, login, membership, register, send_event
 
 REPLICATION_PREFIX = "/_synapse/replication"
 
@@ -28,3 +28,5 @@ class ReplicationRestResource(JsonResource):
         send_event.register_servlets(hs, self)
         membership.register_servlets(hs, self)
         federation.register_servlets(hs, self)
+        login.register_servlets(hs, self)
+        register.register_servlets(hs, self)
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index 5e5376cf58..e81456ab2b 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -127,7 +127,10 @@ class ReplicationEndpoint(object):
         def send_request(**kwargs):
             data = yield cls._serialize_payload(**kwargs)
 
-            url_args = [urllib.parse.quote(kwargs[name]) for name in cls.PATH_ARGS]
+            url_args = [
+                urllib.parse.quote(kwargs[name], safe='')
+                for name in cls.PATH_ARGS
+            ]
 
             if cls.CACHE:
                 txn_id = random_string(10)
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index 64a79da162..0f0a07c422 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -17,7 +17,7 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.events import FrozenEvent
+from synapse.events import event_type_from_format_version
 from synapse.events.snapshot import EventContext
 from synapse.http.servlet import parse_json_object_from_request
 from synapse.replication.http._base import ReplicationEndpoint
@@ -70,6 +70,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
 
             event_payloads.append({
                 "event": event.get_pdu_json(),
+                "event_format_version": event.format_version,
                 "internal_metadata": event.internal_metadata.get_dict(),
                 "rejected_reason": event.rejected_reason,
                 "context": serialized_context,
@@ -94,9 +95,12 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
             event_and_contexts = []
             for event_payload in event_payloads:
                 event_dict = event_payload["event"]
+                format_ver = event_payload["event_format_version"]
                 internal_metadata = event_payload["internal_metadata"]
                 rejected_reason = event_payload["rejected_reason"]
-                event = FrozenEvent(event_dict, internal_metadata, rejected_reason)
+
+                EventType = event_type_from_format_version(format_ver)
+                event = EventType(event_dict, internal_metadata, rejected_reason)
 
                 context = yield EventContext.deserialize(
                     self.store, event_payload["context"],
diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py
new file mode 100644
index 0000000000..63bc0405ea
--- /dev/null
+++ b/synapse/replication/http/login.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.http.servlet import parse_json_object_from_request
+from synapse.replication.http._base import ReplicationEndpoint
+
+logger = logging.getLogger(__name__)
+
+
+class RegisterDeviceReplicationServlet(ReplicationEndpoint):
+    """Ensure a device is registered, generating a new access token for the
+    device.
+
+    Used during registration and login.
+    """
+
+    NAME = "device_check_registered"
+    PATH_ARGS = ("user_id",)
+
+    def __init__(self, hs):
+        super(RegisterDeviceReplicationServlet, self).__init__(hs)
+        self.registration_handler = hs.get_registration_handler()
+
+    @staticmethod
+    def _serialize_payload(user_id, device_id, initial_display_name, is_guest):
+        """
+        Args:
+            device_id (str|None): Device ID to use, if None a new one is
+                generated.
+            initial_display_name (str|None)
+            is_guest (bool)
+        """
+        return {
+            "device_id": device_id,
+            "initial_display_name": initial_display_name,
+            "is_guest": is_guest,
+        }
+
+    @defer.inlineCallbacks
+    def _handle_request(self, request, user_id):
+        content = parse_json_object_from_request(request)
+
+        device_id = content["device_id"]
+        initial_display_name = content["initial_display_name"]
+        is_guest = content["is_guest"]
+
+        device_id, access_token = yield self.registration_handler.register_device(
+            user_id, device_id, initial_display_name, is_guest,
+        )
+
+        defer.returnValue((200, {
+            "device_id": device_id,
+            "access_token": access_token,
+        }))
+
+
+def register_servlets(hs, http_server):
+    RegisterDeviceReplicationServlet(hs).register(http_server)
diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index e58bebf12a..81a2b204c7 100644
--- a/synapse/replication/http/membership.py
+++ b/synapse/replication/http/membership.py
@@ -191,7 +191,7 @@ class ReplicationRegister3PIDGuestRestServlet(ReplicationEndpoint):
     def __init__(self, hs):
         super(ReplicationRegister3PIDGuestRestServlet, self).__init__(hs)
 
-        self.registeration_handler = hs.get_handlers().registration_handler
+        self.registeration_handler = hs.get_registration_handler()
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
 
@@ -251,7 +251,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
     def __init__(self, hs):
         super(ReplicationUserJoinedLeftRoomRestServlet, self).__init__(hs)
 
-        self.registeration_handler = hs.get_handlers().registration_handler
+        self.registeration_handler = hs.get_registration_handler()
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
         self.distributor = hs.get_distributor()
diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py
new file mode 100644
index 0000000000..1d27c9221f
--- /dev/null
+++ b/synapse/replication/http/register.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.http.servlet import parse_json_object_from_request
+from synapse.replication.http._base import ReplicationEndpoint
+
+logger = logging.getLogger(__name__)
+
+
+class ReplicationRegisterServlet(ReplicationEndpoint):
+    """Register a new user
+    """
+
+    NAME = "register_user"
+    PATH_ARGS = ("user_id",)
+
+    def __init__(self, hs):
+        super(ReplicationRegisterServlet, self).__init__(hs)
+        self.store = hs.get_datastore()
+
+    @staticmethod
+    def _serialize_payload(
+        user_id, token, password_hash, was_guest, make_guest, appservice_id,
+        create_profile_with_displayname, admin, user_type,
+    ):
+        """
+        Args:
+            user_id (str): The desired user ID to register.
+            token (str): The desired access token to use for this user. If this
+                is not None, the given access token is associated with the user
+                id.
+            password_hash (str|None): Optional. The password hash for this user.
+            was_guest (bool): Optional. Whether this is a guest account being
+                upgraded to a non-guest account.
+            make_guest (boolean): True if the the new user should be guest,
+                false to add a regular user account.
+            appservice_id (str|None): The ID of the appservice registering the user.
+            create_profile_with_displayname (unicode|None): Optionally create a
+                profile for the user, setting their displayname to the given value
+            admin (boolean): is an admin user?
+            user_type (str|None): type of user. One of the values from
+                api.constants.UserTypes, or None for a normal user.
+        """
+        return {
+            "token": token,
+            "password_hash": password_hash,
+            "was_guest": was_guest,
+            "make_guest": make_guest,
+            "appservice_id": appservice_id,
+            "create_profile_with_displayname": create_profile_with_displayname,
+            "admin": admin,
+            "user_type": user_type,
+        }
+
+    @defer.inlineCallbacks
+    def _handle_request(self, request, user_id):
+        content = parse_json_object_from_request(request)
+
+        yield self.store.register(
+            user_id=user_id,
+            token=content["token"],
+            password_hash=content["password_hash"],
+            was_guest=content["was_guest"],
+            make_guest=content["make_guest"],
+            appservice_id=content["appservice_id"],
+            create_profile_with_displayname=content["create_profile_with_displayname"],
+            admin=content["admin"],
+            user_type=content["user_type"],
+        )
+
+        defer.returnValue((200, {}))
+
+
+class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
+    """Run any post registration actions
+    """
+
+    NAME = "post_register"
+    PATH_ARGS = ("user_id",)
+
+    def __init__(self, hs):
+        super(ReplicationPostRegisterActionsServlet, self).__init__(hs)
+        self.store = hs.get_datastore()
+        self.registration_handler = hs.get_registration_handler()
+
+    @staticmethod
+    def _serialize_payload(user_id, auth_result, access_token, bind_email,
+                           bind_msisdn):
+        """
+        Args:
+            user_id (str): The user ID that consented
+            auth_result (dict): The authenticated credentials of the newly
+                registered user.
+            access_token (str|None): The access token of the newly logged in
+                device, or None if `inhibit_login` enabled.
+            bind_email (bool): Whether to bind the email with the identity
+                server
+            bind_msisdn (bool): Whether to bind the msisdn with the identity
+                server
+        """
+        return {
+            "auth_result": auth_result,
+            "access_token": access_token,
+            "bind_email": bind_email,
+            "bind_msisdn": bind_msisdn,
+        }
+
+    @defer.inlineCallbacks
+    def _handle_request(self, request, user_id):
+        content = parse_json_object_from_request(request)
+
+        auth_result = content["auth_result"]
+        access_token = content["access_token"]
+        bind_email = content["bind_email"]
+        bind_msisdn = content["bind_msisdn"]
+
+        yield self.registration_handler.post_registration_actions(
+            user_id=user_id,
+            auth_result=auth_result,
+            access_token=access_token,
+            bind_email=bind_email,
+            bind_msisdn=bind_msisdn,
+        )
+
+        defer.returnValue((200, {}))
+
+
+def register_servlets(hs, http_server):
+    ReplicationRegisterServlet(hs).register(http_server)
+    ReplicationPostRegisterActionsServlet(hs).register(http_server)
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index 5b52c91650..3635015eda 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -17,7 +17,7 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.events import FrozenEvent
+from synapse.events import event_type_from_format_version
 from synapse.events.snapshot import EventContext
 from synapse.http.servlet import parse_json_object_from_request
 from synapse.replication.http._base import ReplicationEndpoint
@@ -74,6 +74,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
 
         payload = {
             "event": event.get_pdu_json(),
+            "event_format_version": event.format_version,
             "internal_metadata": event.internal_metadata.get_dict(),
             "rejected_reason": event.rejected_reason,
             "context": serialized_context,
@@ -90,9 +91,12 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
             content = parse_json_object_from_request(request)
 
             event_dict = content["event"]
+            format_ver = content["event_format_version"]
             internal_metadata = content["internal_metadata"]
             rejected_reason = content["rejected_reason"]
-            event = FrozenEvent(event_dict, internal_metadata, rejected_reason)
+
+            EventType = event_type_from_format_version(format_ver)
+            event = EventType(event_dict, internal_metadata, rejected_reason)
 
             requester = Requester.deserialize(self.store, content["requester"])
             context = yield EventContext.deserialize(self.store, content["context"])
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index 2d81d49e9a..817d1f67f9 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -17,7 +17,7 @@ import logging
 
 import six
 
-from synapse.storage._base import SQLBaseStore
+from synapse.storage._base import _CURRENT_STATE_CACHE_NAME, SQLBaseStore
 from synapse.storage.engines import PostgresEngine
 
 from ._slaved_id_tracker import SlavedIdTracker
@@ -54,12 +54,12 @@ class BaseSlavedStore(SQLBaseStore):
         if stream_name == "caches":
             self._cache_id_gen.advance(token)
             for row in rows:
-                try:
-                    getattr(self, row.cache_func).invalidate(tuple(row.keys))
-                except AttributeError:
-                    # We probably haven't pulled in the cache in this worker,
-                    # which is fine.
-                    pass
+                if row.cache_func == _CURRENT_STATE_CACHE_NAME:
+                    room_id = row.keys[0]
+                    members_changed = set(row.keys[1:])
+                    self._invalidate_state_caches(room_id, members_changed)
+                else:
+                    self._attempt_to_invalidate_cache(row.cache_func, tuple(row.keys))
 
     def _invalidate_cache_and_stream(self, txn, cache_func, keys):
         txn.call_after(cache_func.invalidate, keys)
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index cbe9645817..586dddb40b 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -106,7 +106,7 @@ class ReplicationClientHandler(object):
 
         Can be overriden in subclasses to handle more.
         """
-        logger.info("Received rdata %s -> %s", stream_name, token)
+        logger.debug("Received rdata %s -> %s", stream_name, token)
         return self.store.process_replication_rows(stream_name, token, rows)
 
     def on_position(self, stream_name, token):
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 5dc7b3fffc..429471c345 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -268,7 +268,17 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
         if "\n" in string:
             raise Exception("Unexpected newline in command: %r", string)
 
-        self.sendLine(string.encode("utf-8"))
+        encoded_string = string.encode("utf-8")
+
+        if len(encoded_string) > self.MAX_LENGTH:
+            raise Exception(
+                "Failed to send command %s as too long (%d > %d)" % (
+                    cmd.NAME,
+                    len(encoded_string), self.MAX_LENGTH,
+                )
+            )
+
+        self.sendLine(encoded_string)
 
         self.last_sent_command = self.clock.time_msec()
 
@@ -361,6 +371,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
     def id(self):
         return "%s-%s" % (self.name, self.conn_id)
 
+    def lineLengthExceeded(self, line):
+        """Called when we receive a line that is above the maximum line length
+        """
+        self.send_error("Line length exceeded")
+
 
 class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
     VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS
@@ -656,7 +671,7 @@ tcp_inbound_commands = LaterGauge(
     "",
     ["command", "name"],
     lambda: {
-        (k[0], p.name,): count
+        (k, p.name,): count
         for p in connected_connections
         for k, count in iteritems(p.inbound_commands_counter)
     },
@@ -667,7 +682,7 @@ tcp_outbound_commands = LaterGauge(
     "",
     ["command", "name"],
     lambda: {
-        (k[0], p.name,): count
+        (k, p.name,): count
         for p in connected_connections
         for k, count in iteritems(p.outbound_commands_counter)
     },
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 4856822a5d..91f5247d52 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -14,8 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from six import PY3
-
 from synapse.http.server import JsonResource
 from synapse.rest.client import versions
 from synapse.rest.client.v1 import (
@@ -36,6 +34,7 @@ from synapse.rest.client.v2_alpha import (
     account,
     account_data,
     auth,
+    capabilities,
     devices,
     filter,
     groups,
@@ -47,6 +46,7 @@ from synapse.rest.client.v2_alpha import (
     register,
     report_event,
     room_keys,
+    room_upgrade_rest_servlet,
     sendtodevice,
     sync,
     tags,
@@ -55,11 +55,6 @@ from synapse.rest.client.v2_alpha import (
     user_directory,
 )
 
-if not PY3:
-    from synapse.rest.client.v1_only import (
-        register as v1_register,
-    )
-
 
 class ClientRestResource(JsonResource):
     """A resource for version 1 of the matrix client API."""
@@ -72,10 +67,6 @@ class ClientRestResource(JsonResource):
     def register_servlets(client_resource, hs):
         versions.register_servlets(client_resource)
 
-        if not PY3:
-            # "v1" (Python 2 only)
-            v1_register.register_servlets(hs, client_resource)
-
         # Deprecated in r0
         initial_sync.register_servlets(hs, client_resource)
         room.register_deprecated_servlets(hs, client_resource)
@@ -116,3 +107,5 @@ class ClientRestResource(JsonResource):
         sendtodevice.register_servlets(hs, client_resource)
         user_directory.register_servlets(hs, client_resource)
         groups.register_servlets(hs, client_resource)
+        room_upgrade_rest_servlet.register_servlets(hs, client_resource)
+        capabilities.register_servlets(hs, client_resource)
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py
index 41534b8c2a..82433a2aa9 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/client/v1/admin.py
@@ -23,7 +23,7 @@ from six.moves import http_client
 
 from twisted.internet import defer
 
-from synapse.api.constants import Membership
+from synapse.api.constants import Membership, UserTypes
 from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
 from synapse.http.servlet import (
     assert_params_in_dict,
@@ -158,6 +158,11 @@ class UserRegisterServlet(ClientV1RestServlet):
                 raise SynapseError(400, "Invalid password")
 
         admin = body.get("admin", None)
+        user_type = body.get("user_type", None)
+
+        if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
+            raise SynapseError(400, "Invalid user type")
+
         got_mac = body["mac"]
 
         want_mac = hmac.new(
@@ -171,6 +176,9 @@ class UserRegisterServlet(ClientV1RestServlet):
         want_mac.update(password)
         want_mac.update(b"\x00")
         want_mac.update(b"admin" if admin else b"notadmin")
+        if user_type:
+            want_mac.update(b"\x00")
+            want_mac.update(user_type.encode('utf8'))
         want_mac = want_mac.hexdigest()
 
         if not hmac.compare_digest(
@@ -189,6 +197,7 @@ class UserRegisterServlet(ClientV1RestServlet):
             password=body["password"],
             admin=bool(admin),
             generate_token=False,
+            user_type=user_type,
         )
 
         result = yield register._create_registration_details(user_id, body)
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 0010699d31..6121c5b6df 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -18,17 +18,18 @@ import xml.etree.ElementTree as ET
 
 from six.moves import urllib
 
-from canonicaljson import json
-from saml2 import BINDING_HTTP_POST, config
-from saml2.client import Saml2Client
-
 from twisted.internet import defer
 from twisted.web.client import PartialDownloadError
 
 from synapse.api.errors import Codes, LoginError, SynapseError
 from synapse.http.server import finish_request
-from synapse.http.servlet import parse_json_object_from_request
-from synapse.types import UserID
+from synapse.http.servlet import (
+    RestServlet,
+    parse_json_object_from_request,
+    parse_string,
+)
+from synapse.rest.well_known import WellKnownBuilder
+from synapse.types import UserID, map_username_to_mxid_localpart
 from synapse.util.msisdn import phone_number_to_msisdn
 
 from .base import ClientV1RestServlet, client_path_patterns
@@ -81,30 +82,31 @@ def login_id_thirdparty_from_phone(identifier):
 
 class LoginRestServlet(ClientV1RestServlet):
     PATTERNS = client_path_patterns("/login$")
-    SAML2_TYPE = "m.login.saml2"
     CAS_TYPE = "m.login.cas"
+    SSO_TYPE = "m.login.sso"
     TOKEN_TYPE = "m.login.token"
     JWT_TYPE = "m.login.jwt"
 
     def __init__(self, hs):
         super(LoginRestServlet, self).__init__(hs)
-        self.idp_redirect_url = hs.config.saml2_idp_redirect_url
-        self.saml2_enabled = hs.config.saml2_enabled
         self.jwt_enabled = hs.config.jwt_enabled
         self.jwt_secret = hs.config.jwt_secret
         self.jwt_algorithm = hs.config.jwt_algorithm
         self.cas_enabled = hs.config.cas_enabled
         self.auth_handler = self.hs.get_auth_handler()
-        self.device_handler = self.hs.get_device_handler()
+        self.registration_handler = hs.get_registration_handler()
         self.handlers = hs.get_handlers()
+        self._well_known_builder = WellKnownBuilder(hs)
 
     def on_GET(self, request):
         flows = []
         if self.jwt_enabled:
             flows.append({"type": LoginRestServlet.JWT_TYPE})
-        if self.saml2_enabled:
-            flows.append({"type": LoginRestServlet.SAML2_TYPE})
         if self.cas_enabled:
+            flows.append({"type": LoginRestServlet.SSO_TYPE})
+
+            # we advertise CAS for backwards compat, though MSC1721 renamed it
+            # to SSO.
             flows.append({"type": LoginRestServlet.CAS_TYPE})
 
             # While its valid for us to advertise this login type generally,
@@ -129,29 +131,21 @@ class LoginRestServlet(ClientV1RestServlet):
     def on_POST(self, request):
         login_submission = parse_json_object_from_request(request)
         try:
-            if self.saml2_enabled and (login_submission["type"] ==
-                                       LoginRestServlet.SAML2_TYPE):
-                relay_state = ""
-                if "relay_state" in login_submission:
-                    relay_state = "&RelayState=" + urllib.parse.quote(
-                                  login_submission["relay_state"])
-                result = {
-                    "uri": "%s%s" % (self.idp_redirect_url, relay_state)
-                }
-                defer.returnValue((200, result))
-            elif self.jwt_enabled and (login_submission["type"] ==
-                                       LoginRestServlet.JWT_TYPE):
+            if self.jwt_enabled and (login_submission["type"] ==
+                                     LoginRestServlet.JWT_TYPE):
                 result = yield self.do_jwt_login(login_submission)
-                defer.returnValue(result)
             elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE:
                 result = yield self.do_token_login(login_submission)
-                defer.returnValue(result)
             else:
                 result = yield self._do_other_login(login_submission)
-                defer.returnValue(result)
         except KeyError:
             raise SynapseError(400, "Missing JSON keys.")
 
+        well_known_data = self._well_known_builder.get_well_known()
+        if well_known_data:
+            result["well_known"] = well_known_data
+        defer.returnValue((200, result))
+
     @defer.inlineCallbacks
     def _do_other_login(self, login_submission):
         """Handle non-token/saml/jwt logins
@@ -160,7 +154,7 @@ class LoginRestServlet(ClientV1RestServlet):
             login_submission:
 
         Returns:
-            (int, object): HTTP code/response
+            dict: HTTP response
         """
         # Log the request we got, but only certain fields to minimise the chance of
         # logging someone's password (even if they accidentally put it in the wrong
@@ -226,11 +220,10 @@ class LoginRestServlet(ClientV1RestServlet):
             login_submission,
         )
 
-        device_id = yield self._register_device(
-            canonical_user_id, login_submission,
-        )
-        access_token = yield auth_handler.get_access_token_for_user_id(
-            canonical_user_id, device_id,
+        device_id = login_submission.get("device_id")
+        initial_display_name = login_submission.get("initial_device_display_name")
+        device_id, access_token = yield self.registration_handler.register_device(
+            canonical_user_id, device_id, initial_display_name,
         )
 
         result = {
@@ -243,7 +236,7 @@ class LoginRestServlet(ClientV1RestServlet):
         if callback is not None:
             yield callback(result)
 
-        defer.returnValue((200, result))
+        defer.returnValue(result)
 
     @defer.inlineCallbacks
     def do_token_login(self, login_submission):
@@ -252,10 +245,13 @@ class LoginRestServlet(ClientV1RestServlet):
         user_id = (
             yield auth_handler.validate_short_term_login_token_and_get_user_id(token)
         )
-        device_id = yield self._register_device(user_id, login_submission)
-        access_token = yield auth_handler.get_access_token_for_user_id(
-            user_id, device_id,
+
+        device_id = login_submission.get("device_id")
+        initial_display_name = login_submission.get("initial_device_display_name")
+        device_id, access_token = yield self.registration_handler.register_device(
+            user_id, device_id, initial_display_name,
         )
+
         result = {
             "user_id": user_id,  # may have changed
             "access_token": access_token,
@@ -263,7 +259,7 @@ class LoginRestServlet(ClientV1RestServlet):
             "device_id": device_id,
         }
 
-        defer.returnValue((200, result))
+        defer.returnValue(result)
 
     @defer.inlineCallbacks
     def do_jwt_login(self, login_submission):
@@ -292,11 +288,10 @@ class LoginRestServlet(ClientV1RestServlet):
         auth_handler = self.auth_handler
         registered_user_id = yield auth_handler.check_user_exists(user_id)
         if registered_user_id:
-            device_id = yield self._register_device(
-                registered_user_id, login_submission
-            )
-            access_token = yield auth_handler.get_access_token_for_user_id(
-                registered_user_id, device_id,
+            device_id = login_submission.get("device_id")
+            initial_display_name = login_submission.get("initial_device_display_name")
+            device_id, access_token = yield self.registration_handler.register_device(
+                registered_user_id, device_id, initial_display_name,
             )
 
             result = {
@@ -305,90 +300,30 @@ class LoginRestServlet(ClientV1RestServlet):
                 "home_server": self.hs.hostname,
             }
         else:
-            # TODO: we should probably check that the register isn't going
-            # to fonx/change our user_id before registering the device
-            device_id = yield self._register_device(user_id, login_submission)
             user_id, access_token = (
                 yield self.handlers.registration_handler.register(localpart=user)
             )
+
+            device_id = login_submission.get("device_id")
+            initial_display_name = login_submission.get("initial_device_display_name")
+            device_id, access_token = yield self.registration_handler.register_device(
+                registered_user_id, device_id, initial_display_name,
+            )
+
             result = {
                 "user_id": user_id,  # may have changed
                 "access_token": access_token,
                 "home_server": self.hs.hostname,
             }
 
-        defer.returnValue((200, result))
-
-    def _register_device(self, user_id, login_submission):
-        """Register a device for a user.
-
-        This is called after the user's credentials have been validated, but
-        before the access token has been issued.
-
-        Args:
-            (str) user_id: full canonical @user:id
-            (object) login_submission: dictionary supplied to /login call, from
-               which we pull device_id and initial_device_name
-        Returns:
-            defer.Deferred: (str) device_id
-        """
-        device_id = login_submission.get("device_id")
-        initial_display_name = login_submission.get(
-            "initial_device_display_name")
-        return self.device_handler.check_device_registered(
-            user_id, device_id, initial_display_name
-        )
+        defer.returnValue(result)
 
 
-class SAML2RestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/login/saml2", releases=())
+class CasRedirectServlet(RestServlet):
+    PATTERNS = client_path_patterns("/login/(cas|sso)/redirect")
 
     def __init__(self, hs):
-        super(SAML2RestServlet, self).__init__(hs)
-        self.sp_config = hs.config.saml2_config_path
-        self.handlers = hs.get_handlers()
-
-    @defer.inlineCallbacks
-    def on_POST(self, request):
-        saml2_auth = None
-        try:
-            conf = config.SPConfig()
-            conf.load_file(self.sp_config)
-            SP = Saml2Client(conf)
-            saml2_auth = SP.parse_authn_request_response(
-                request.args['SAMLResponse'][0], BINDING_HTTP_POST)
-        except Exception as e:        # Not authenticated
-            logger.exception(e)
-        if saml2_auth and saml2_auth.status_ok() and not saml2_auth.not_signed:
-            username = saml2_auth.name_id.text
-            handler = self.handlers.registration_handler
-            (user_id, token) = yield handler.register_saml2(username)
-            # Forward to the RelayState callback along with ava
-            if 'RelayState' in request.args:
-                request.redirect(urllib.parse.unquote(
-                                 request.args['RelayState'][0]) +
-                                 '?status=authenticated&access_token=' +
-                                 token + '&user_id=' + user_id + '&ava=' +
-                                 urllib.quote(json.dumps(saml2_auth.ava)))
-                finish_request(request)
-                defer.returnValue(None)
-            defer.returnValue((200, {"status": "authenticated",
-                                     "user_id": user_id, "token": token,
-                                     "ava": saml2_auth.ava}))
-        elif 'RelayState' in request.args:
-            request.redirect(urllib.parse.unquote(
-                             request.args['RelayState'][0]) +
-                             '?status=not_authenticated')
-            finish_request(request)
-            defer.returnValue(None)
-        defer.returnValue((200, {"status": "not_authenticated"}))
-
-
-class CasRedirectServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/login/cas/redirect", releases=())
-
-    def __init__(self, hs):
-        super(CasRedirectServlet, self).__init__(hs)
+        super(CasRedirectServlet, self).__init__()
         self.cas_server_url = hs.config.cas_server_url.encode('ascii')
         self.cas_service_url = hs.config.cas_service_url.encode('ascii')
 
@@ -416,17 +351,15 @@ class CasTicketServlet(ClientV1RestServlet):
         self.cas_server_url = hs.config.cas_server_url
         self.cas_service_url = hs.config.cas_service_url
         self.cas_required_attributes = hs.config.cas_required_attributes
-        self.auth_handler = hs.get_auth_handler()
-        self.handlers = hs.get_handlers()
-        self.macaroon_gen = hs.get_macaroon_generator()
+        self._sso_auth_handler = SSOAuthHandler(hs)
 
     @defer.inlineCallbacks
     def on_GET(self, request):
-        client_redirect_url = request.args[b"redirectUrl"][0]
+        client_redirect_url = parse_string(request, "redirectUrl", required=True)
         http_client = self.hs.get_simple_http_client()
         uri = self.cas_server_url + "/proxyValidate"
         args = {
-            "ticket": request.args[b"ticket"][0].decode('ascii'),
+            "ticket": parse_string(request, "ticket", required=True),
             "service": self.cas_service_url
         }
         try:
@@ -438,7 +371,6 @@ class CasTicketServlet(ClientV1RestServlet):
         result = yield self.handle_cas_response(request, body, client_redirect_url)
         defer.returnValue(result)
 
-    @defer.inlineCallbacks
     def handle_cas_response(self, request, cas_response_body, client_redirect_url):
         user, attributes = self.parse_cas_response(cas_response_body)
 
@@ -454,28 +386,9 @@ class CasTicketServlet(ClientV1RestServlet):
                 if required_value != actual_value:
                     raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
 
-        user_id = UserID(user, self.hs.hostname).to_string()
-        auth_handler = self.auth_handler
-        registered_user_id = yield auth_handler.check_user_exists(user_id)
-        if not registered_user_id:
-            registered_user_id, _ = (
-                yield self.handlers.registration_handler.register(localpart=user)
-            )
-
-        login_token = self.macaroon_gen.generate_short_term_login_token(
-            registered_user_id
+        return self._sso_auth_handler.on_successful_auth(
+            user, request, client_redirect_url,
         )
-        redirect_url = self.add_login_token_to_redirect_url(client_redirect_url,
-                                                            login_token)
-        request.redirect(redirect_url)
-        finish_request(request)
-
-    def add_login_token_to_redirect_url(self, url, token):
-        url_parts = list(urllib.parse.urlparse(url))
-        query = dict(urllib.parse.parse_qsl(url_parts[4]))
-        query.update({"loginToken": token})
-        url_parts[4] = urllib.parse.urlencode(query).encode('ascii')
-        return urllib.parse.urlunparse(url_parts)
 
     def parse_cas_response(self, cas_response_body):
         user = None
@@ -510,10 +423,78 @@ class CasTicketServlet(ClientV1RestServlet):
         return user, attributes
 
 
+class SSOAuthHandler(object):
+    """
+    Utility class for Resources and Servlets which handle the response from a SSO
+    service
+
+    Args:
+        hs (synapse.server.HomeServer)
+    """
+    def __init__(self, hs):
+        self._hostname = hs.hostname
+        self._auth_handler = hs.get_auth_handler()
+        self._registration_handler = hs.get_registration_handler()
+        self._macaroon_gen = hs.get_macaroon_generator()
+
+    @defer.inlineCallbacks
+    def on_successful_auth(
+        self, username, request, client_redirect_url,
+        user_display_name=None,
+    ):
+        """Called once the user has successfully authenticated with the SSO.
+
+        Registers the user if necessary, and then returns a redirect (with
+        a login token) to the client.
+
+        Args:
+            username (unicode|bytes): the remote user id. We'll map this onto
+                something sane for a MXID localpath.
+
+            request (SynapseRequest): the incoming request from the browser. We'll
+                respond to it with a redirect.
+
+            client_redirect_url (unicode): the redirect_url the client gave us when
+                it first started the process.
+
+            user_display_name (unicode|None): if set, and we have to register a new user,
+                we will set their displayname to this.
+
+        Returns:
+            Deferred[none]: Completes once we have handled the request.
+        """
+        localpart = map_username_to_mxid_localpart(username)
+        user_id = UserID(localpart, self._hostname).to_string()
+        registered_user_id = yield self._auth_handler.check_user_exists(user_id)
+        if not registered_user_id:
+            registered_user_id, _ = (
+                yield self._registration_handler.register(
+                    localpart=localpart,
+                    generate_token=False,
+                    default_display_name=user_display_name,
+                )
+            )
+
+        login_token = self._macaroon_gen.generate_short_term_login_token(
+            registered_user_id
+        )
+        redirect_url = self._add_login_token_to_redirect_url(
+            client_redirect_url, login_token
+        )
+        request.redirect(redirect_url)
+        finish_request(request)
+
+    @staticmethod
+    def _add_login_token_to_redirect_url(url, token):
+        url_parts = list(urllib.parse.urlparse(url))
+        query = dict(urllib.parse.parse_qsl(url_parts[4]))
+        query.update({"loginToken": token})
+        url_parts[4] = urllib.parse.urlencode(query)
+        return urllib.parse.urlunparse(url_parts)
+
+
 def register_servlets(hs, http_server):
     LoginRestServlet(hs).register(http_server)
-    if hs.config.saml2_enabled:
-        SAML2RestServlet(hs).register(http_server)
     if hs.config.cas_enabled:
         CasRedirectServlet(hs).register(http_server)
         CasTicketServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index 9382b1f124..c654f9b5f0 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -42,7 +42,7 @@ class PushRuleRestServlet(ClientV1RestServlet):
 
     @defer.inlineCallbacks
     def on_PUT(self, request):
-        spec = _rule_spec_from_path(request.postpath)
+        spec = _rule_spec_from_path([x.decode('utf8') for x in request.postpath])
         try:
             priority_class = _priority_class_from_spec(spec)
         except InvalidRuleException as e:
@@ -103,7 +103,7 @@ class PushRuleRestServlet(ClientV1RestServlet):
 
     @defer.inlineCallbacks
     def on_DELETE(self, request):
-        spec = _rule_spec_from_path(request.postpath)
+        spec = _rule_spec_from_path([x.decode('utf8') for x in request.postpath])
 
         requester = yield self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
@@ -134,7 +134,7 @@ class PushRuleRestServlet(ClientV1RestServlet):
 
         rules = format_push_rules_for_user(requester.user, rules)
 
-        path = request.postpath[1:]
+        path = [x.decode('utf8') for x in request.postpath][1:]
 
         if path == []:
             # we're a reference impl: pedantry is our job.
@@ -142,11 +142,10 @@ class PushRuleRestServlet(ClientV1RestServlet):
                 PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR
             )
 
-        if path[0] == b'':
+        if path[0] == '':
             defer.returnValue((200, rules))
-        elif path[0] == b'global':
-            path = [x.decode('ascii') for x in path[1:]]
-            result = _filter_ruleset_with_path(rules['global'], path)
+        elif path[0] == 'global':
+            result = _filter_ruleset_with_path(rules['global'], path[1:])
             defer.returnValue((200, result))
         else:
             raise UnrecognizedRequestError()
@@ -190,12 +189,24 @@ class PushRuleRestServlet(ClientV1RestServlet):
 
 
 def _rule_spec_from_path(path):
+    """Turn a sequence of path components into a rule spec
+
+    Args:
+        path (sequence[unicode]): the URL path components.
+
+    Returns:
+        dict: rule spec dict, containing scope/template/rule_id entries,
+            and possibly attr.
+
+    Raises:
+        UnrecognizedRequestError if the path components cannot be parsed.
+    """
     if len(path) < 2:
         raise UnrecognizedRequestError()
-    if path[0] != b'pushrules':
+    if path[0] != 'pushrules':
         raise UnrecognizedRequestError()
 
-    scope = path[1].decode('ascii')
+    scope = path[1]
     path = path[2:]
     if scope != 'global':
         raise UnrecognizedRequestError()
@@ -203,13 +214,13 @@ def _rule_spec_from_path(path):
     if len(path) == 0:
         raise UnrecognizedRequestError()
 
-    template = path[0].decode('ascii')
+    template = path[0]
     path = path[1:]
 
     if len(path) == 0 or len(path[0]) == 0:
         raise UnrecognizedRequestError()
 
-    rule_id = path[0].decode('ascii')
+    rule_id = path[0]
 
     spec = {
         'scope': scope,
@@ -220,7 +231,7 @@ def _rule_spec_from_path(path):
     path = path[1:]
 
     if len(path) > 0 and len(path[0]) > 0:
-        spec['attr'] = path[0].decode('ascii')
+        spec['attr'] = path[0]
 
     return spec
 
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index b84f0260f2..4c07ae7f45 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -142,7 +142,7 @@ class PushersRemoveRestServlet(RestServlet):
     To allow pusher to be delete by clicking a link (ie. GET request)
     """
     PATTERNS = client_path_patterns("/pushers/remove$")
-    SUCCESS_HTML = "<html><body>You have been unsubscribed</body><html>"
+    SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>"
 
     def __init__(self, hs):
         super(PushersRemoveRestServlet, self).__init__()
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index fcfe7857f6..48da4d557f 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -89,7 +89,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
     def __init__(self, hs):
         super(RoomStateEventRestServlet, self).__init__(hs)
         self.handlers = hs.get_handlers()
-        self.event_creation_hander = hs.get_event_creation_handler()
+        self.event_creation_handler = hs.get_event_creation_handler()
         self.room_member_handler = hs.get_room_member_handler()
         self.message_handler = hs.get_message_handler()
 
@@ -172,7 +172,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
                 content=content,
             )
         else:
-            event = yield self.event_creation_hander.create_and_send_nonmember_event(
+            event = yield self.event_creation_handler.create_and_send_nonmember_event(
                 requester,
                 event_dict,
                 txn_id=txn_id,
@@ -189,7 +189,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet):
 
     def __init__(self, hs):
         super(RoomSendEventRestServlet, self).__init__(hs)
-        self.event_creation_hander = hs.get_event_creation_handler()
+        self.event_creation_handler = hs.get_event_creation_handler()
 
     def register(self, http_server):
         # /rooms/$roomid/send/$event_type[/$txn_id]
@@ -211,7 +211,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet):
         if b'ts' in request.args and requester.app_service:
             event_dict['origin_server_ts'] = parse_integer(request, "ts", 0)
 
-        event = yield self.event_creation_hander.create_and_send_nonmember_event(
+        event = yield self.event_creation_handler.create_and_send_nonmember_event(
             requester,
             event_dict,
             txn_id=txn_id,
diff --git a/synapse/rest/client/v1_only/__init__.py b/synapse/rest/client/v1_only/__init__.py
deleted file mode 100644
index 936f902ace..0000000000
--- a/synapse/rest/client/v1_only/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""
-REST APIs that are only used in v1 (the legacy API).
-"""
diff --git a/synapse/rest/client/v1_only/base.py b/synapse/rest/client/v1_only/base.py
deleted file mode 100644
index 9d4db7437c..0000000000
--- a/synapse/rest/client/v1_only/base.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""This module contains base REST classes for constructing client v1 servlets.
-"""
-
-import re
-
-from synapse.api.urls import CLIENT_PREFIX
-
-
-def v1_only_client_path_patterns(path_regex, include_in_unstable=True):
-    """Creates a regex compiled client path with the correct client path
-    prefix.
-
-    Args:
-        path_regex (str): The regex string to match. This should NOT have a ^
-        as this will be prefixed.
-    Returns:
-        list of SRE_Pattern
-    """
-    patterns = [re.compile("^" + CLIENT_PREFIX + path_regex)]
-    if include_in_unstable:
-        unstable_prefix = CLIENT_PREFIX.replace("/api/v1", "/unstable")
-        patterns.append(re.compile("^" + unstable_prefix + path_regex))
-    return patterns
diff --git a/synapse/rest/client/v1_only/register.py b/synapse/rest/client/v1_only/register.py
deleted file mode 100644
index dadb376b02..0000000000
--- a/synapse/rest/client/v1_only/register.py
+++ /dev/null
@@ -1,392 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""This module contains REST servlets to do with registration: /register"""
-import hmac
-import logging
-from hashlib import sha1
-
-from twisted.internet import defer
-
-import synapse.util.stringutils as stringutils
-from synapse.api.constants import LoginType
-from synapse.api.errors import Codes, SynapseError
-from synapse.config.server import is_threepid_reserved
-from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request
-from synapse.rest.client.v1.base import ClientV1RestServlet
-from synapse.types import create_requester
-
-from .base import v1_only_client_path_patterns
-
-logger = logging.getLogger(__name__)
-
-
-# We ought to be using hmac.compare_digest() but on older pythons it doesn't
-# exist. It's a _really minor_ security flaw to use plain string comparison
-# because the timing attack is so obscured by all the other code here it's
-# unlikely to make much difference
-if hasattr(hmac, "compare_digest"):
-    compare_digest = hmac.compare_digest
-else:
-    def compare_digest(a, b):
-        return a == b
-
-
-class RegisterRestServlet(ClientV1RestServlet):
-    """Handles registration with the home server.
-
-    This servlet is in control of the registration flow; the registration
-    handler doesn't have a concept of multi-stages or sessions.
-    """
-
-    PATTERNS = v1_only_client_path_patterns("/register$", include_in_unstable=False)
-
-    def __init__(self, hs):
-        """
-        Args:
-            hs (synapse.server.HomeServer): server
-        """
-        super(RegisterRestServlet, self).__init__(hs)
-        # sessions are stored as:
-        # self.sessions = {
-        #   "session_id" : { __session_dict__ }
-        # }
-        # TODO: persistent storage
-        self.sessions = {}
-        self.enable_registration = hs.config.enable_registration
-        self.auth = hs.get_auth()
-        self.auth_handler = hs.get_auth_handler()
-        self.handlers = hs.get_handlers()
-
-    def on_GET(self, request):
-
-        require_email = 'email' in self.hs.config.registrations_require_3pid
-        require_msisdn = 'msisdn' in self.hs.config.registrations_require_3pid
-
-        flows = []
-        if self.hs.config.enable_registration_captcha:
-            # only support the email-only flow if we don't require MSISDN 3PIDs
-            if not require_msisdn:
-                flows.extend([
-                    {
-                        "type": LoginType.RECAPTCHA,
-                        "stages": [
-                            LoginType.RECAPTCHA,
-                            LoginType.EMAIL_IDENTITY,
-                            LoginType.PASSWORD
-                        ]
-                    },
-                ])
-            # only support 3PIDless registration if no 3PIDs are required
-            if not require_email and not require_msisdn:
-                flows.extend([
-                    {
-                        "type": LoginType.RECAPTCHA,
-                        "stages": [LoginType.RECAPTCHA, LoginType.PASSWORD]
-                    }
-                ])
-        else:
-            # only support the email-only flow if we don't require MSISDN 3PIDs
-            if require_email or not require_msisdn:
-                flows.extend([
-                    {
-                        "type": LoginType.EMAIL_IDENTITY,
-                        "stages": [
-                            LoginType.EMAIL_IDENTITY, LoginType.PASSWORD
-                        ]
-                    }
-                ])
-            # only support 3PIDless registration if no 3PIDs are required
-            if not require_email and not require_msisdn:
-                flows.extend([
-                    {
-                        "type": LoginType.PASSWORD
-                    }
-                ])
-        return (200, {"flows": flows})
-
-    @defer.inlineCallbacks
-    def on_POST(self, request):
-        register_json = parse_json_object_from_request(request)
-
-        session = (register_json["session"]
-                   if "session" in register_json else None)
-        login_type = None
-        assert_params_in_dict(register_json, ["type"])
-
-        try:
-            login_type = register_json["type"]
-
-            is_application_server = login_type == LoginType.APPLICATION_SERVICE
-            can_register = (
-                self.enable_registration
-                or is_application_server
-            )
-            if not can_register:
-                raise SynapseError(403, "Registration has been disabled")
-
-            stages = {
-                LoginType.RECAPTCHA: self._do_recaptcha,
-                LoginType.PASSWORD: self._do_password,
-                LoginType.EMAIL_IDENTITY: self._do_email_identity,
-                LoginType.APPLICATION_SERVICE: self._do_app_service,
-            }
-
-            session_info = self._get_session_info(request, session)
-            logger.debug("%s : session info %s   request info %s",
-                         login_type, session_info, register_json)
-            response = yield stages[login_type](
-                request,
-                register_json,
-                session_info
-            )
-
-            if "access_token" not in response:
-                # isn't a final response
-                response["session"] = session_info["id"]
-
-            defer.returnValue((200, response))
-        except KeyError as e:
-            logger.exception(e)
-            raise SynapseError(400, "Missing JSON keys for login type %s." % (
-                login_type,
-            ))
-
-    def on_OPTIONS(self, request):
-        return (200, {})
-
-    def _get_session_info(self, request, session_id):
-        if not session_id:
-            # create a new session
-            while session_id is None or session_id in self.sessions:
-                session_id = stringutils.random_string(24)
-            self.sessions[session_id] = {
-                "id": session_id,
-                LoginType.EMAIL_IDENTITY: False,
-                LoginType.RECAPTCHA: False
-            }
-
-        return self.sessions[session_id]
-
-    def _save_session(self, session):
-        # TODO: Persistent storage
-        logger.debug("Saving session %s", session)
-        self.sessions[session["id"]] = session
-
-    def _remove_session(self, session):
-        logger.debug("Removing session %s", session)
-        self.sessions.pop(session["id"])
-
-    @defer.inlineCallbacks
-    def _do_recaptcha(self, request, register_json, session):
-        if not self.hs.config.enable_registration_captcha:
-            raise SynapseError(400, "Captcha not required.")
-
-        yield self._check_recaptcha(request, register_json, session)
-
-        session[LoginType.RECAPTCHA] = True  # mark captcha as done
-        self._save_session(session)
-        defer.returnValue({
-            "next": [LoginType.PASSWORD, LoginType.EMAIL_IDENTITY]
-        })
-
-    @defer.inlineCallbacks
-    def _check_recaptcha(self, request, register_json, session):
-        if ("captcha_bypass_hmac" in register_json and
-                self.hs.config.captcha_bypass_secret):
-            if "user" not in register_json:
-                raise SynapseError(400, "Captcha bypass needs 'user'")
-
-            want = hmac.new(
-                key=self.hs.config.captcha_bypass_secret,
-                msg=register_json["user"],
-                digestmod=sha1,
-            ).hexdigest()
-
-            # str() because otherwise hmac complains that 'unicode' does not
-            # have the buffer interface
-            got = str(register_json["captcha_bypass_hmac"])
-
-            if compare_digest(want, got):
-                session["user"] = register_json["user"]
-                defer.returnValue(None)
-            else:
-                raise SynapseError(
-                    400, "Captcha bypass HMAC incorrect",
-                    errcode=Codes.CAPTCHA_NEEDED
-                )
-
-        challenge = None
-        user_response = None
-        try:
-            challenge = register_json["challenge"]
-            user_response = register_json["response"]
-        except KeyError:
-            raise SynapseError(400, "Captcha response is required",
-                               errcode=Codes.CAPTCHA_NEEDED)
-
-        ip_addr = self.hs.get_ip_from_request(request)
-
-        handler = self.handlers.registration_handler
-        yield handler.check_recaptcha(
-            ip_addr,
-            self.hs.config.recaptcha_private_key,
-            challenge,
-            user_response
-        )
-
-    @defer.inlineCallbacks
-    def _do_email_identity(self, request, register_json, session):
-        if (self.hs.config.enable_registration_captcha and
-                not session[LoginType.RECAPTCHA]):
-            raise SynapseError(400, "Captcha is required.")
-
-        threepidCreds = register_json['threepidCreds']
-        handler = self.handlers.registration_handler
-        logger.debug("Registering email. threepidcreds: %s" % (threepidCreds))
-        yield handler.register_email(threepidCreds)
-        session["threepidCreds"] = threepidCreds  # store creds for next stage
-        session[LoginType.EMAIL_IDENTITY] = True  # mark email as done
-        self._save_session(session)
-        defer.returnValue({
-            "next": LoginType.PASSWORD
-        })
-
-    @defer.inlineCallbacks
-    def _do_password(self, request, register_json, session):
-        if (self.hs.config.enable_registration_captcha and
-                not session[LoginType.RECAPTCHA]):
-            # captcha should've been done by this stage!
-            raise SynapseError(400, "Captcha is required.")
-
-        if ("user" in session and "user" in register_json and
-                session["user"] != register_json["user"]):
-            raise SynapseError(
-                400, "Cannot change user ID during registration"
-            )
-
-        password = register_json["password"].encode("utf-8")
-        desired_user_id = (
-            register_json["user"].encode("utf-8")
-            if "user" in register_json else None
-        )
-        threepid = None
-        if session.get(LoginType.EMAIL_IDENTITY):
-            threepid = session["threepidCreds"]
-
-        handler = self.handlers.registration_handler
-        (user_id, token) = yield handler.register(
-            localpart=desired_user_id,
-            password=password,
-            threepid=threepid,
-        )
-        # Necessary due to auth checks prior to the threepid being
-        # written to the db
-        if is_threepid_reserved(self.hs.config, threepid):
-            yield self.store.upsert_monthly_active_user(user_id)
-
-        if session[LoginType.EMAIL_IDENTITY]:
-            logger.debug("Binding emails %s to %s" % (
-                session["threepidCreds"], user_id)
-            )
-            yield handler.bind_emails(user_id, session["threepidCreds"])
-
-        result = {
-            "user_id": user_id,
-            "access_token": token,
-            "home_server": self.hs.hostname,
-        }
-        self._remove_session(session)
-        defer.returnValue(result)
-
-    @defer.inlineCallbacks
-    def _do_app_service(self, request, register_json, session):
-        as_token = self.auth.get_access_token_from_request(request)
-
-        assert_params_in_dict(register_json, ["user"])
-        user_localpart = register_json["user"].encode("utf-8")
-
-        handler = self.handlers.registration_handler
-        user_id = yield handler.appservice_register(
-            user_localpart, as_token
-        )
-        token = yield self.auth_handler.issue_access_token(user_id)
-        self._remove_session(session)
-        defer.returnValue({
-            "user_id": user_id,
-            "access_token": token,
-            "home_server": self.hs.hostname,
-        })
-
-
-class CreateUserRestServlet(ClientV1RestServlet):
-    """Handles user creation via a server-to-server interface
-    """
-
-    PATTERNS = v1_only_client_path_patterns("/createUser$")
-
-    def __init__(self, hs):
-        super(CreateUserRestServlet, self).__init__(hs)
-        self.store = hs.get_datastore()
-        self.handlers = hs.get_handlers()
-
-    @defer.inlineCallbacks
-    def on_POST(self, request):
-        user_json = parse_json_object_from_request(request)
-
-        access_token = self.auth.get_access_token_from_request(request)
-        app_service = self.store.get_app_service_by_token(
-            access_token
-        )
-        if not app_service:
-            raise SynapseError(403, "Invalid application service token.")
-
-        requester = create_requester(app_service.sender)
-
-        logger.debug("creating user: %s", user_json)
-        response = yield self._do_create(requester, user_json)
-
-        defer.returnValue((200, response))
-
-    def on_OPTIONS(self, request):
-        return 403, {}
-
-    @defer.inlineCallbacks
-    def _do_create(self, requester, user_json):
-        assert_params_in_dict(user_json, ["localpart", "displayname"])
-
-        localpart = user_json["localpart"].encode("utf-8")
-        displayname = user_json["displayname"].encode("utf-8")
-        password_hash = user_json["password_hash"].encode("utf-8") \
-            if user_json.get("password_hash") else None
-
-        handler = self.handlers.registration_handler
-        user_id, token = yield handler.get_or_create_user(
-            requester=requester,
-            localpart=localpart,
-            displayname=displayname,
-            password_hash=password_hash
-        )
-
-        defer.returnValue({
-            "user_id": user_id,
-            "access_token": token,
-            "home_server": self.hs.hostname,
-        })
-
-
-def register_servlets(hs, http_server):
-    RegisterRestServlet(hs).register(http_server)
-    CreateUserRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py
index 21c734e525..a926aeea2d 100644
--- a/synapse/rest/client/v2_alpha/account_data.py
+++ b/synapse/rest/client/v2_alpha/account_data.py
@@ -17,7 +17,7 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.api.errors import AuthError, SynapseError
+from synapse.api.errors import AuthError, NotFoundError, SynapseError
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.types import UserID
 
@@ -29,6 +29,7 @@ logger = logging.getLogger(__name__)
 class AccountDataServlet(RestServlet):
     """
     PUT /user/{user_id}/account_data/{account_dataType} HTTP/1.1
+    GET /user/{user_id}/account_data/{account_dataType} HTTP/1.1
     """
     PATTERNS = client_v2_patterns(
         "/user/(?P<user_id>[^/]*)/account_data/(?P<account_data_type>[^/]*)"
@@ -64,10 +65,26 @@ class AccountDataServlet(RestServlet):
 
         defer.returnValue((200, {}))
 
+    @defer.inlineCallbacks
+    def on_GET(self, request, user_id, account_data_type):
+        requester = yield self.auth.get_user_by_req(request)
+        if user_id != requester.user.to_string():
+            raise AuthError(403, "Cannot get account data for other users.")
+
+        event = yield self.store.get_global_account_data_by_type_for_user(
+            account_data_type, user_id,
+        )
+
+        if event is None:
+            raise NotFoundError("Account data not found")
+
+        defer.returnValue((200, event))
+
 
 class RoomAccountDataServlet(RestServlet):
     """
     PUT /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1
+    GET /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1
     """
     PATTERNS = client_v2_patterns(
         "/user/(?P<user_id>[^/]*)"
@@ -106,6 +123,21 @@ class RoomAccountDataServlet(RestServlet):
 
         defer.returnValue((200, {}))
 
+    @defer.inlineCallbacks
+    def on_GET(self, request, user_id, room_id, account_data_type):
+        requester = yield self.auth.get_user_by_req(request)
+        if user_id != requester.user.to_string():
+            raise AuthError(403, "Cannot get account data for other users.")
+
+        event = yield self.store.get_account_data_for_room_and_type(
+            user_id, room_id, account_data_type,
+        )
+
+        if event is None:
+            raise NotFoundError("Room account data not found")
+
+        defer.returnValue((200, event))
+
 
 def register_servlets(hs, http_server):
     AccountDataServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index 693b303881..ac035c7735 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -21,7 +21,7 @@ from synapse.api.constants import LoginType
 from synapse.api.errors import SynapseError
 from synapse.api.urls import CLIENT_V2_ALPHA_PREFIX
 from synapse.http.server import finish_request
-from synapse.http.servlet import RestServlet
+from synapse.http.servlet import RestServlet, parse_string
 
 from ._base import client_v2_patterns
 
@@ -33,7 +33,7 @@ RECAPTCHA_TEMPLATE = """
 <title>Authentication</title>
 <meta name='viewport' content='width=device-width, initial-scale=1,
     user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
-<script src="https://www.google.com/recaptcha/api.js"
+<script src="https://www.recaptcha.net/recaptcha/api.js"
     async defer></script>
 <script src="//code.jquery.com/jquery-1.11.2.min.js"></script>
 <link rel="stylesheet" href="/_matrix/static/client/register/style.css">
@@ -68,6 +68,29 @@ function captchaDone() {
 </html>
 """
 
+TERMS_TEMPLATE = """
+<html>
+<head>
+<title>Authentication</title>
+<meta name='viewport' content='width=device-width, initial-scale=1,
+    user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
+</head>
+<body>
+<form id="registrationForm" method="post" action="%(myurl)s">
+    <div>
+        <p>
+            Please click the button below if you agree to the
+            <a href="%(terms_url)s">privacy policy of this homeserver.</a>
+        </p>
+        <input type="hidden" name="session" value="%(session)s" />
+        <input type="submit" value="Agree" />
+    </div>
+</form>
+</body>
+</html>
+"""
+
 SUCCESS_TEMPLATE = """
 <html>
 <head>
@@ -106,18 +129,14 @@ class AuthRestServlet(RestServlet):
         self.hs = hs
         self.auth = hs.get_auth()
         self.auth_handler = hs.get_auth_handler()
-        self.registration_handler = hs.get_handlers().registration_handler
+        self.registration_handler = hs.get_registration_handler()
 
-    @defer.inlineCallbacks
     def on_GET(self, request, stagetype):
-        yield
-        if stagetype == LoginType.RECAPTCHA:
-            if ('session' not in request.args or
-                    len(request.args['session']) == 0):
-                raise SynapseError(400, "No session supplied")
-
-            session = request.args["session"][0]
+        session = parse_string(request, "session")
+        if not session:
+            raise SynapseError(400, "No session supplied")
 
+        if stagetype == LoginType.RECAPTCHA:
             html = RECAPTCHA_TEMPLATE % {
                 'session': session,
                 'myurl': "%s/auth/%s/fallback/web" % (
@@ -132,25 +151,44 @@ class AuthRestServlet(RestServlet):
 
             request.write(html_bytes)
             finish_request(request)
-            defer.returnValue(None)
+            return None
+        elif stagetype == LoginType.TERMS:
+            html = TERMS_TEMPLATE % {
+                'session': session,
+                'terms_url': "%s_matrix/consent?v=%s" % (
+                    self.hs.config.public_baseurl,
+                    self.hs.config.user_consent_version,
+                ),
+                'myurl': "%s/auth/%s/fallback/web" % (
+                    CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS
+                ),
+            }
+            html_bytes = html.encode("utf8")
+            request.setResponseCode(200)
+            request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+            request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
+
+            request.write(html_bytes)
+            finish_request(request)
+            return None
         else:
             raise SynapseError(404, "Unknown auth stage type")
 
     @defer.inlineCallbacks
     def on_POST(self, request, stagetype):
-        yield
-        if stagetype == "m.login.recaptcha":
-            if ('g-recaptcha-response' not in request.args or
-                    len(request.args['g-recaptcha-response'])) == 0:
-                raise SynapseError(400, "No captcha response supplied")
-            if ('session' not in request.args or
-                    len(request.args['session'])) == 0:
-                raise SynapseError(400, "No session supplied")
 
-            session = request.args['session'][0]
+        session = parse_string(request, "session")
+        if not session:
+            raise SynapseError(400, "No session supplied")
+
+        if stagetype == LoginType.RECAPTCHA:
+            response = parse_string(request, "g-recaptcha-response")
+
+            if not response:
+                raise SynapseError(400, "No captcha response supplied")
 
             authdict = {
-                'response': request.args['g-recaptcha-response'][0],
+                'response': response,
                 'session': session,
             }
 
@@ -179,6 +217,41 @@ class AuthRestServlet(RestServlet):
             finish_request(request)
 
             defer.returnValue(None)
+        elif stagetype == LoginType.TERMS:
+            if ('session' not in request.args or
+                    len(request.args['session'])) == 0:
+                raise SynapseError(400, "No session supplied")
+
+            session = request.args['session'][0]
+            authdict = {'session': session}
+
+            success = yield self.auth_handler.add_oob_auth(
+                LoginType.TERMS,
+                authdict,
+                self.hs.get_ip_from_request(request)
+            )
+
+            if success:
+                html = SUCCESS_TEMPLATE
+            else:
+                html = TERMS_TEMPLATE % {
+                    'session': session,
+                    'terms_url': "%s_matrix/consent?v=%s" % (
+                        self.hs.config.public_baseurl,
+                        self.hs.config.user_consent_version,
+                    ),
+                    'myurl': "%s/auth/%s/fallback/web" % (
+                        CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS
+                    ),
+                }
+            html_bytes = html.encode("utf8")
+            request.setResponseCode(200)
+            request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+            request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
+
+            request.write(html_bytes)
+            finish_request(request)
+            defer.returnValue(None)
         else:
             raise SynapseError(404, "Unknown auth stage type")
 
diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py
new file mode 100644
index 0000000000..373f95126e
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/capabilities.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.constants import DEFAULT_ROOM_VERSION, RoomDisposition, RoomVersions
+from synapse.http.servlet import RestServlet
+
+from ._base import client_v2_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class CapabilitiesRestServlet(RestServlet):
+    """End point to expose the capabilities of the server."""
+
+    PATTERNS = client_v2_patterns("/capabilities$")
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        super(CapabilitiesRestServlet, self).__init__()
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.store = hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def on_GET(self, request):
+        requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+        user = yield self.store.get_user_by_id(requester.user.to_string())
+        change_password = bool(user["password_hash"])
+
+        response = {
+            "capabilities": {
+                "m.room_versions": {
+                    "default": DEFAULT_ROOM_VERSION,
+                    "available": {
+                        RoomVersions.V1: RoomDisposition.STABLE,
+                        RoomVersions.V2: RoomDisposition.STABLE,
+                        RoomVersions.STATE_V2_TEST: RoomDisposition.UNSTABLE,
+                        RoomVersions.V3: RoomDisposition.STABLE,
+                    },
+                },
+                "m.change_password": {"enabled": change_password},
+            }
+        }
+        defer.returnValue((200, response))
+
+
+def register_servlets(hs, http_server):
+    CapabilitiesRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index cf1b70e39e..a107e707c7 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -147,7 +147,7 @@ class UsernameAvailabilityRestServlet(RestServlet):
         """
         super(UsernameAvailabilityRestServlet, self).__init__()
         self.hs = hs
-        self.registration_handler = hs.get_handlers().registration_handler
+        self.registration_handler = hs.get_registration_handler()
         self.ratelimiter = FederationRateLimiter(
             hs.get_clock(),
             # Time window of 2s
@@ -189,10 +189,9 @@ class RegisterRestServlet(RestServlet):
         self.auth = hs.get_auth()
         self.store = hs.get_datastore()
         self.auth_handler = hs.get_auth_handler()
-        self.registration_handler = hs.get_handlers().registration_handler
+        self.registration_handler = hs.get_registration_handler()
         self.identity_handler = hs.get_handlers().identity_handler
         self.room_member_handler = hs.get_room_member_handler()
-        self.device_handler = hs.get_device_handler()
         self.macaroon_gen = hs.get_macaroon_generator()
 
     @interactive_auth_handler
@@ -307,22 +306,23 @@ class RegisterRestServlet(RestServlet):
                 session_id, "registered_user_id", None
             )
 
-        # Only give msisdn flows if the x_show_msisdn flag is given:
-        # this is a hack to work around the fact that clients were shipped
-        # that use fallback registration if they see any flows that they don't
-        # recognise, which means we break registration for these clients if we
-        # advertise msisdn flows. Once usage of Riot iOS <=0.3.9 and Riot
-        # Android <=0.6.9 have fallen below an acceptable threshold, this
-        # parameter should go away and we should always advertise msisdn flows.
-        show_msisdn = False
-        if 'x_show_msisdn' in body and body['x_show_msisdn']:
-            show_msisdn = True
+        if desired_username is not None:
+            yield self.registration_handler.check_username(
+                desired_username,
+                guest_access_token=guest_access_token,
+                assigned_user_id=registered_user_id,
+            )
 
         # FIXME: need a better error than "no auth flow found" for scenarios
         # where we required 3PID for registration but the user didn't give one
         require_email = 'email' in self.hs.config.registrations_require_3pid
         require_msisdn = 'msisdn' in self.hs.config.registrations_require_3pid
 
+        show_msisdn = True
+        if self.hs.config.disable_msisdn_registration:
+            show_msisdn = False
+            require_msisdn = False
+
         flows = []
         if self.hs.config.enable_registration_captcha:
             # only support 3PIDless registration if no 3PIDs are required
@@ -357,6 +357,13 @@ class RegisterRestServlet(RestServlet):
                     [LoginType.MSISDN, LoginType.EMAIL_IDENTITY]
                 ])
 
+        # Append m.login.terms to all flows if we're requiring consent
+        if self.hs.config.user_consent_at_registration:
+            new_flows = []
+            for flow in flows:
+                flow.append(LoginType.TERMS)
+            flows.extend(new_flows)
+
         auth_result, params, session_id = yield self.auth_handler.check_auth(
             flows, body, self.hs.get_ip_from_request(request)
         )
@@ -462,8 +469,7 @@ class RegisterRestServlet(RestServlet):
                 registered_user_id
             )
             # don't re-register the threepids
-            add_email = False
-            add_msisdn = False
+            registered = False
         else:
             # NB: This may be from the auth handler and NOT from the POST
             assert_params_in_dict(params, ["password"])
@@ -491,13 +497,16 @@ class RegisterRestServlet(RestServlet):
                 password=params.get("password", None),
                 guest_access_token=guest_access_token,
                 generate_token=False,
-                display_name=desired_display_name,
+                default_display_name=desired_display_name,
                 threepid=threepid,
             )
             # Necessary due to auth checks prior to the threepid being
             # written to the db
-            if is_threepid_reserved(self.hs.config, threepid):
-                yield self.store.upsert_monthly_active_user(registered_user_id)
+            if threepid:
+                if is_threepid_reserved(
+                    self.hs.config.mau_limits_reserved_threepids, threepid
+                ):
+                    yield self.store.upsert_monthly_active_user(registered_user_id)
 
             if self.hs.config.shadow_server:
                 yield self.registration_handler.shadow_register(
@@ -513,25 +522,19 @@ class RegisterRestServlet(RestServlet):
                 session_id, "registered_user_id", registered_user_id
             )
 
-            add_email = True
-            add_msisdn = True
+            registered = True
 
         return_dict = yield self._create_registration_details(
             registered_user_id, params
         )
 
-        if add_email and auth_result and LoginType.EMAIL_IDENTITY in auth_result:
-            threepid = auth_result[LoginType.EMAIL_IDENTITY]
-            yield self._register_email_threepid(
-                registered_user_id, threepid, return_dict["access_token"],
-                params.get("bind_email")
-            )
-
-        if add_msisdn and auth_result and LoginType.MSISDN in auth_result:
-            threepid = auth_result[LoginType.MSISDN]
-            yield self._register_msisdn_threepid(
-                registered_user_id, threepid, return_dict["access_token"],
-                params.get("bind_msisdn")
+        if registered:
+            yield self.registration_handler.post_registration_actions(
+                user_id=registered_user_id,
+                auth_result=auth_result,
+                access_token=return_dict.get("access_token"),
+                bind_email=params.get("bind_email"),
+                bind_msisdn=params.get("bind_msisdn"),
             )
 
         defer.returnValue((200, return_dict))
@@ -607,115 +610,6 @@ class RegisterRestServlet(RestServlet):
         defer.returnValue(result)
 
     @defer.inlineCallbacks
-    def _register_email_threepid(self, user_id, threepid, token, bind_email):
-        """Add an email address as a 3pid identifier
-
-        Also adds an email pusher for the email address, if configured in the
-        HS config
-
-        Also optionally binds emails to the given user_id on the identity server
-
-        Args:
-            user_id (str): id of user
-            threepid (object): m.login.email.identity auth response
-            token (str): access_token for the user
-            bind_email (bool): true if the client requested the email to be
-                bound at the identity server
-        Returns:
-            defer.Deferred:
-        """
-        reqd = ('medium', 'address', 'validated_at')
-        if any(x not in threepid for x in reqd):
-            # This will only happen if the ID server returns a malformed response
-            logger.info("Can't add incomplete 3pid")
-            return
-
-        yield self.auth_handler.add_threepid(
-            user_id,
-            threepid['medium'],
-            threepid['address'],
-            threepid['validated_at'],
-        )
-
-        # And we add an email pusher for them by default, but only
-        # if email notifications are enabled (so people don't start
-        # getting mail spam where they weren't before if email
-        # notifs are set up on a home server)
-        if (self.hs.config.email_enable_notifs and
-                self.hs.config.email_notif_for_new_users):
-            # Pull the ID of the access token back out of the db
-            # It would really make more sense for this to be passed
-            # up when the access token is saved, but that's quite an
-            # invasive change I'd rather do separately.
-            user_tuple = yield self.store.get_user_by_access_token(
-                token
-            )
-            token_id = user_tuple["token_id"]
-
-            yield self.hs.get_pusherpool().add_pusher(
-                user_id=user_id,
-                access_token=token_id,
-                kind="email",
-                app_id="m.email",
-                app_display_name="Email Notifications",
-                device_display_name=threepid["address"],
-                pushkey=threepid["address"],
-                lang=None,  # We don't know a user's language here
-                data={},
-            )
-
-        if bind_email:
-            logger.info("bind_email specified: binding")
-            logger.debug("Binding emails %s to %s" % (
-                threepid, user_id
-            ))
-            yield self.identity_handler.bind_threepid(
-                threepid['threepid_creds'], user_id
-            )
-        else:
-            logger.info("bind_email not specified: not binding email")
-
-    @defer.inlineCallbacks
-    def _register_msisdn_threepid(self, user_id, threepid, token, bind_msisdn):
-        """Add a phone number as a 3pid identifier
-
-        Also optionally binds msisdn to the given user_id on the identity server
-
-        Args:
-            user_id (str): id of user
-            threepid (object): m.login.msisdn auth response
-            token (str): access_token for the user
-            bind_email (bool): true if the client requested the email to be
-                bound at the identity server
-        Returns:
-            defer.Deferred:
-        """
-        try:
-            assert_params_in_dict(threepid, ['medium', 'address', 'validated_at'])
-        except SynapseError as ex:
-            if ex.errcode == Codes.MISSING_PARAM:
-                # This will only happen if the ID server returns a malformed response
-                logger.info("Can't add incomplete 3pid")
-                defer.returnValue(None)
-            raise
-
-        yield self.auth_handler.add_threepid(
-            user_id,
-            threepid['medium'],
-            threepid['address'],
-            threepid['validated_at'],
-        )
-
-        if bind_msisdn:
-            logger.info("bind_msisdn specified: binding")
-            logger.debug("Binding msisdn %s to %s", threepid, user_id)
-            yield self.identity_handler.bind_threepid(
-                threepid['threepid_creds'], user_id
-            )
-        else:
-            logger.info("bind_msisdn not specified: not binding msisdn")
-
-    @defer.inlineCallbacks
     def _create_registration_details(self, user_id, params):
         """Complete registration of newly-registered user
 
@@ -733,12 +627,10 @@ class RegisterRestServlet(RestServlet):
             "home_server": self.hs.hostname,
         }
         if not params.get("inhibit_login", False):
-            device_id = yield self._register_device(user_id, params)
-
-            access_token = (
-                yield self.auth_handler.get_access_token_for_user_id(
-                    user_id, device_id=device_id,
-                )
+            device_id = params.get("device_id")
+            initial_display_name = params.get("initial_device_display_name")
+            device_id, access_token = yield self.registration_handler.register_device(
+                user_id, device_id, initial_display_name, is_guest=False,
             )
 
             result.update({
@@ -747,26 +639,6 @@ class RegisterRestServlet(RestServlet):
             })
         defer.returnValue(result)
 
-    def _register_device(self, user_id, params):
-        """Register a device for a user.
-
-        This is called after the user's credentials have been validated, but
-        before the access token has been issued.
-
-        Args:
-            (str) user_id: full canonical @user:id
-            (object) params: registration parameters, from which we pull
-                device_id and initial_device_name
-        Returns:
-            defer.Deferred: (str) device_id
-        """
-        # register the user's device
-        device_id = params.get("device_id")
-        initial_display_name = params.get("initial_device_display_name")
-        return self.device_handler.check_device_registered(
-            user_id, device_id, initial_display_name
-        )
-
     @defer.inlineCallbacks
     def _do_guest_registration(self, params):
         if not self.hs.config.allow_guest_access:
@@ -780,13 +652,10 @@ class RegisterRestServlet(RestServlet):
         # we have nowhere to store it.
         device_id = synapse.api.auth.GUEST_DEVICE_ID
         initial_display_name = params.get("initial_device_display_name")
-        yield self.device_handler.check_device_registered(
-            user_id, device_id, initial_display_name
+        device_id, access_token = yield self.registration_handler.register_device(
+            user_id, device_id, initial_display_name, is_guest=True,
         )
 
-        access_token = self.macaroon_gen.generate_access_token(
-            user_id, ["guest = true"]
-        )
         defer.returnValue((200, {
             "user_id": user_id,
             "device_id": device_id,
diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py
index 45b5817d8b..220a0de30b 100644
--- a/synapse/rest/client/v2_alpha/room_keys.py
+++ b/synapse/rest/client/v2_alpha/room_keys.py
@@ -17,7 +17,7 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.api.errors import Codes, SynapseError
+from synapse.api.errors import Codes, NotFoundError, SynapseError
 from synapse.http.servlet import (
     RestServlet,
     parse_json_object_from_request,
@@ -208,10 +208,25 @@ class RoomKeysServlet(RestServlet):
             user_id, version, room_id, session_id
         )
 
+        # Convert room_keys to the right format to return.
         if session_id:
-            room_keys = room_keys['rooms'][room_id]['sessions'][session_id]
+            # If the client requests a specific session, but that session was
+            # not backed up, then return an M_NOT_FOUND.
+            if room_keys['rooms'] == {}:
+                raise NotFoundError("No room_keys found")
+            else:
+                room_keys = room_keys['rooms'][room_id]['sessions'][session_id]
         elif room_id:
-            room_keys = room_keys['rooms'][room_id]
+            # If the client requests all sessions from a room, but no sessions
+            # are found, then return an empty result rather than an error, so
+            # that clients don't have to handle an error condition, and an
+            # empty result is valid.  (Similarly if the client requests all
+            # sessions from the backup, but in that case, room_keys is already
+            # in the right format, so we don't need to do anything about it.)
+            if room_keys['rooms'] == {}:
+                room_keys = {'sessions': {}}
+            else:
+                room_keys = room_keys['rooms'][room_id]
 
         defer.returnValue((200, room_keys))
 
@@ -365,6 +380,40 @@ class RoomKeysVersionServlet(RestServlet):
         )
         defer.returnValue((200, {}))
 
+    @defer.inlineCallbacks
+    def on_PUT(self, request, version):
+        """
+        Update the information about a given version of the user's room_keys backup.
+
+        POST /room_keys/version/12345 HTTP/1.1
+        Content-Type: application/json
+        {
+            "algorithm": "m.megolm_backup.v1",
+            "auth_data": {
+                "public_key": "abcdefg",
+                "signatures": {
+                    "ed25519:something": "hijklmnop"
+                }
+            },
+            "version": "42"
+        }
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+        {}
+        """
+        requester = yield self.auth.get_user_by_req(request, allow_guest=False)
+        user_id = requester.user.to_string()
+        info = parse_json_object_from_request(request)
+
+        if version is None:
+            raise SynapseError(400, "No version specified to update", Codes.MISSING_PARAM)
+
+        yield self.e2e_room_keys_handler.update_version(
+            user_id, version, info
+        )
+        defer.returnValue((200, {}))
+
 
 def register_servlets(hs, http_server):
     RoomKeysServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
new file mode 100644
index 0000000000..e6356101fd
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.constants import KNOWN_ROOM_VERSIONS
+from synapse.api.errors import Codes, SynapseError
+from synapse.http.servlet import (
+    RestServlet,
+    assert_params_in_dict,
+    parse_json_object_from_request,
+)
+
+from ._base import client_v2_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class RoomUpgradeRestServlet(RestServlet):
+    """Handler for room uprade requests.
+
+    Handles requests of the form:
+
+        POST /_matrix/client/r0/rooms/$roomid/upgrade HTTP/1.1
+        Content-Type: application/json
+
+        {
+            "new_version": "2",
+        }
+
+    Creates a new room and shuts down the old one. Returns the ID of the new room.
+
+    Args:
+        hs (synapse.server.HomeServer):
+    """
+    PATTERNS = client_v2_patterns(
+        # /rooms/$roomid/upgrade
+        "/rooms/(?P<room_id>[^/]*)/upgrade$",
+        v2_alpha=False,
+    )
+
+    def __init__(self, hs):
+        super(RoomUpgradeRestServlet, self).__init__()
+        self._hs = hs
+        self._room_creation_handler = hs.get_room_creation_handler()
+        self._auth = hs.get_auth()
+
+    @defer.inlineCallbacks
+    def on_POST(self, request, room_id):
+        requester = yield self._auth.get_user_by_req(request)
+
+        content = parse_json_object_from_request(request)
+        assert_params_in_dict(content, ("new_version", ))
+        new_version = content["new_version"]
+
+        if new_version not in KNOWN_ROOM_VERSIONS:
+            raise SynapseError(
+                400,
+                "Your homeserver does not support this room version",
+                Codes.UNSUPPORTED_ROOM_VERSION,
+            )
+
+        new_room_id = yield self._room_creation_handler.upgrade_room(
+            requester, room_id, new_version
+        )
+
+        ret = {
+            "replacement_room": new_room_id,
+        }
+
+        defer.returnValue((200, ret))
+
+
+def register_servlets(hs, http_server):
+    RoomUpgradeRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index 0251146722..39d157a44b 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -75,7 +75,7 @@ class SyncRestServlet(RestServlet):
     """
 
     PATTERNS = client_v2_patterns("/sync$")
-    ALLOWED_PRESENCE = set(["online", "offline"])
+    ALLOWED_PRESENCE = set(["online", "offline", "unavailable"])
 
     def __init__(self, hs):
         super(SyncRestServlet, self).__init__()
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 29e62bfcdd..27e7cbf3cc 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -38,6 +38,7 @@ class VersionsRestServlet(RestServlet):
                 "r0.1.0",
                 "r0.2.0",
                 "r0.3.0",
+                "r0.4.0",
             ],
             # as per MSC1497:
             "unstable_features": {
diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py
index 7362e1858d..6b371bfa2f 100644
--- a/synapse/rest/consent/consent_resource.py
+++ b/synapse/rest/consent/consent_resource.py
@@ -89,6 +89,7 @@ class ConsentResource(Resource):
 
         self.hs = hs
         self.store = hs.get_datastore()
+        self.registration_handler = hs.get_registration_handler()
 
         # this is required by the request_handler wrapper
         self.clock = hs.get_clock()
@@ -100,16 +101,7 @@ class ConsentResource(Resource):
                 "missing in config file.",
             )
 
-        # daemonize changes the cwd to /, so make the path absolute now.
-        consent_template_directory = path.abspath(
-            hs.config.user_consent_template_dir,
-        )
-        if not path.isdir(consent_template_directory):
-            raise ConfigError(
-                "Could not find template directory '%s'" % (
-                    consent_template_directory,
-                ),
-            )
+        consent_template_directory = hs.config.user_consent_template_dir
 
         loader = jinja2.FileSystemLoader(consent_template_directory)
         self._jinja_env = jinja2.Environment(
@@ -137,27 +129,36 @@ class ConsentResource(Resource):
             request (twisted.web.http.Request):
         """
 
-        version = parse_string(request, "v",
-                               default=self._default_consent_version)
-        username = parse_string(request, "u", required=True)
-        userhmac = parse_string(request, "h", required=True, encoding=None)
+        version = parse_string(request, "v", default=self._default_consent_version)
+        username = parse_string(request, "u", required=False, default="")
+        userhmac = None
+        has_consented = False
+        public_version = username == ""
+        if not public_version:
+            userhmac_bytes = parse_string(request, "h", required=True, encoding=None)
 
-        self._check_hash(username, userhmac)
+            self._check_hash(username, userhmac_bytes)
 
-        if username.startswith('@'):
-            qualified_user_id = username
-        else:
-            qualified_user_id = UserID(username, self.hs.hostname).to_string()
+            if username.startswith('@'):
+                qualified_user_id = username
+            else:
+                qualified_user_id = UserID(username, self.hs.hostname).to_string()
 
-        u = yield self.store.get_user_by_id(qualified_user_id)
-        if u is None:
-            raise NotFoundError("Unknown user")
+            u = yield self.store.get_user_by_id(qualified_user_id)
+            if u is None:
+                raise NotFoundError("Unknown user")
+
+            has_consented = u["consent_version"] == version
+            userhmac = userhmac_bytes.decode("ascii")
 
         try:
             self._render_template(
                 request, "%s.html" % (version,),
-                user=username, userhmac=userhmac, version=version,
-                has_consented=(u["consent_version"] == version),
+                user=username,
+                userhmac=userhmac,
+                version=version,
+                has_consented=has_consented,
+                public_version=public_version,
             )
         except TemplateNotFound:
             raise NotFoundError("Unknown policy version")
@@ -190,6 +191,7 @@ class ConsentResource(Resource):
             if e.code != 404:
                 raise
             raise NotFoundError("Unknown user")
+        yield self.registration_handler.post_consent_actions(qualified_user_id)
 
         try:
             self._render_template(request, "success.html")
@@ -223,7 +225,7 @@ class ConsentResource(Resource):
             key=self._hmac_secret,
             msg=userid.encode('utf-8'),
             digestmod=sha256,
-        ).hexdigest()
+        ).hexdigest().encode('ascii')
 
         if not compare_digest(want_mac, userhmac):
             raise SynapseError(http_client.FORBIDDEN, "HMAC incorrect")
diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py
deleted file mode 100644
index 38eb2ee23f..0000000000
--- a/synapse/rest/key/v1/server_key_resource.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import logging
-
-from canonicaljson import encode_canonical_json
-from signedjson.sign import sign_json
-from unpaddedbase64 import encode_base64
-
-from OpenSSL import crypto
-from twisted.web.resource import Resource
-
-from synapse.http.server import respond_with_json_bytes
-
-logger = logging.getLogger(__name__)
-
-
-class LocalKey(Resource):
-    """HTTP resource containing encoding the TLS X.509 certificate and NACL
-    signature verification keys for this server::
-
-        GET /key HTTP/1.1
-
-        HTTP/1.1 200 OK
-        Content-Type: application/json
-        {
-            "server_name": "this.server.example.com"
-            "verify_keys": {
-                "algorithm:version": # base64 encoded NACL verification key.
-            },
-            "tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert.
-            "signatures": {
-                "this.server.example.com": {
-                   "algorithm:version": # NACL signature for this server.
-                }
-            }
-        }
-    """
-
-    def __init__(self, hs):
-        self.response_body = encode_canonical_json(
-            self.response_json_object(hs.config)
-        )
-        Resource.__init__(self)
-
-    @staticmethod
-    def response_json_object(server_config):
-        verify_keys = {}
-        for key in server_config.signing_key:
-            verify_key_bytes = key.verify_key.encode()
-            key_id = "%s:%s" % (key.alg, key.version)
-            verify_keys[key_id] = encode_base64(verify_key_bytes)
-
-        x509_certificate_bytes = crypto.dump_certificate(
-            crypto.FILETYPE_ASN1,
-            server_config.tls_certificate
-        )
-        json_object = {
-            u"server_name": server_config.server_name,
-            u"verify_keys": verify_keys,
-            u"tls_certificate": encode_base64(x509_certificate_bytes)
-        }
-        for key in server_config.signing_key:
-            json_object = sign_json(
-                json_object,
-                server_config.server_name,
-                key,
-            )
-
-        return json_object
-
-    def render_GET(self, request):
-        return respond_with_json_bytes(
-            request, 200, self.response_body,
-        )
-
-    def getChild(self, name, request):
-        if name == b'':
-            return self
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index 76e479afa3..d16a30acd8 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -16,6 +16,7 @@
 import logging
 import os
 
+from six import PY3
 from six.moves import urllib
 
 from twisted.internet import defer
@@ -48,26 +49,21 @@ def parse_media_id(request):
         return server_name, media_id, file_name
     except Exception:
         raise SynapseError(
-            404,
-            "Invalid media id token %r" % (request.postpath,),
-            Codes.UNKNOWN,
+            404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN
         )
 
 
 def respond_404(request):
     respond_with_json(
-        request, 404,
-        cs_error(
-            "Not found %r" % (request.postpath,),
-            code=Codes.NOT_FOUND,
-        ),
-        send_cors=True
+        request,
+        404,
+        cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND),
+        send_cors=True,
     )
 
 
 @defer.inlineCallbacks
-def respond_with_file(request, media_type, file_path,
-                      file_size=None, upload_name=None):
+def respond_with_file(request, media_type, file_path, file_size=None, upload_name=None):
     logger.debug("Responding with %r", file_path)
 
     if os.path.isfile(file_path):
@@ -97,31 +93,26 @@ def add_file_headers(request, media_type, file_size, upload_name):
         file_size (int): Size in bytes of the media, if known.
         upload_name (str): The name of the requested file, if any.
     """
+
     def _quote(x):
         return urllib.parse.quote(x.encode("utf-8"))
 
     request.setHeader(b"Content-Type", media_type.encode("UTF-8"))
     if upload_name:
         if is_ascii(upload_name):
-            disposition = ("inline; filename=%s" % (_quote(upload_name),)).encode("ascii")
+            disposition = "inline; filename=%s" % (_quote(upload_name),)
         else:
-            disposition = (
-                "inline; filename*=utf-8''%s" % (_quote(upload_name),)).encode("ascii")
+            disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),)
 
-        request.setHeader(b"Content-Disposition", disposition)
+        request.setHeader(b"Content-Disposition", disposition.encode('ascii'))
 
     # cache for at least a day.
     # XXX: we might want to turn this off for data we don't want to
     # recommend caching as it's sensitive or private - or at least
     # select private. don't bother setting Expires as all our
     # clients are smart enough to be happy with Cache-Control
-    request.setHeader(
-        b"Cache-Control", b"public,max-age=86400,s-maxage=86400"
-    )
-
-    request.setHeader(
-        b"Content-Length", b"%d" % (file_size,)
-    )
+    request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
+    request.setHeader(b"Content-Length", b"%d" % (file_size,))
 
 
 @defer.inlineCallbacks
@@ -142,8 +133,15 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam
 
     logger.debug("Responding to media request with responder %s")
     add_file_headers(request, media_type, file_size, upload_name)
-    with responder:
-        yield responder.write_to_consumer(request)
+    try:
+        with responder:
+            yield responder.write_to_consumer(request)
+    except Exception as e:
+        # The majority of the time this will be due to the client having gone
+        # away. Unfortunately, Twisted simply throws a generic exception at us
+        # in that case.
+        logger.warning("Failed to write to consumer: %s %s", type(e), e)
+
     finish_request(request)
 
 
@@ -153,6 +151,7 @@ class Responder(object):
     Responder is a context manager which *must* be used, so that any resources
     held can be cleaned up.
     """
+
     def write_to_consumer(self, consumer):
         """Stream response into consumer
 
@@ -186,9 +185,18 @@ class FileInfo(object):
         thumbnail_method (str)
         thumbnail_type (str): Content type of thumbnail, e.g. image/png
     """
-    def __init__(self, server_name, file_id, url_cache=False,
-                 thumbnail=False, thumbnail_width=None, thumbnail_height=None,
-                 thumbnail_method=None, thumbnail_type=None):
+
+    def __init__(
+        self,
+        server_name,
+        file_id,
+        url_cache=False,
+        thumbnail=False,
+        thumbnail_width=None,
+        thumbnail_height=None,
+        thumbnail_method=None,
+        thumbnail_type=None,
+    ):
         self.server_name = server_name
         self.file_id = file_id
         self.url_cache = url_cache
@@ -197,3 +205,74 @@ class FileInfo(object):
         self.thumbnail_height = thumbnail_height
         self.thumbnail_method = thumbnail_method
         self.thumbnail_type = thumbnail_type
+
+
+def get_filename_from_headers(headers):
+    """
+    Get the filename of the downloaded file by inspecting the
+    Content-Disposition HTTP header.
+
+    Args:
+        headers (twisted.web.http_headers.Headers): The HTTP
+            request headers.
+
+    Returns:
+        A Unicode string of the filename, or None.
+    """
+    content_disposition = headers.get(b"Content-Disposition", [b''])
+
+    # No header, bail out.
+    if not content_disposition[0]:
+        return
+
+    # dict of unicode: bytes, corresponding to the key value sections of the
+    # Content-Disposition header.
+    params = {}
+    parts = content_disposition[0].split(b";")
+    for i in parts:
+        # Split into key-value pairs, if able
+        # We don't care about things like `inline`, so throw it out
+        if b"=" not in i:
+            continue
+
+        key, value = i.strip().split(b"=")
+        params[key.decode('ascii')] = value
+
+    upload_name = None
+
+    # First check if there is a valid UTF-8 filename
+    upload_name_utf8 = params.get("filename*", None)
+    if upload_name_utf8:
+        if upload_name_utf8.lower().startswith(b"utf-8''"):
+            upload_name_utf8 = upload_name_utf8[7:]
+            # We have a filename*= section. This MUST be ASCII, and any UTF-8
+            # bytes are %-quoted.
+            if PY3:
+                try:
+                    # Once it is decoded, we can then unquote the %-encoded
+                    # parts strictly into a unicode string.
+                    upload_name = urllib.parse.unquote(
+                        upload_name_utf8.decode('ascii'), errors="strict"
+                    )
+                except UnicodeDecodeError:
+                    # Incorrect UTF-8.
+                    pass
+            else:
+                # On Python 2, we first unquote the %-encoded parts and then
+                # decode it strictly using UTF-8.
+                try:
+                    upload_name = urllib.parse.unquote(upload_name_utf8).decode('utf8')
+                except UnicodeDecodeError:
+                    pass
+
+    # If there isn't check for an ascii name.
+    if not upload_name:
+        upload_name_ascii = params.get("filename", None)
+        if upload_name_ascii and is_ascii(upload_name_ascii):
+            # Make sure there's no %-quoted bytes. If there is, reject it as
+            # non-valid ASCII.
+            if b"%" not in upload_name_ascii:
+                upload_name = upload_name_ascii.decode('ascii')
+
+    # This may be None here, indicating we did not find a matching name.
+    return upload_name
diff --git a/synapse/rest/media/v1/config_resource.py b/synapse/rest/media/v1/config_resource.py
index d6605b6027..77316033f7 100644
--- a/synapse/rest/media/v1/config_resource.py
+++ b/synapse/rest/media/v1/config_resource.py
@@ -41,7 +41,7 @@ class MediaConfigResource(Resource):
     @defer.inlineCallbacks
     def _async_render_GET(self, request):
         yield self.auth.get_user_by_req(request)
-        respond_with_json(request, 200, self.limits_dict)
+        respond_with_json(request, 200, self.limits_dict, send_cors=True)
 
     def render_OPTIONS(self, request):
         respond_with_json(request, 200, {}, send_cors=True)
diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py
index f911b120b1..bdc5daecc1 100644
--- a/synapse/rest/media/v1/download_resource.py
+++ b/synapse/rest/media/v1/download_resource.py
@@ -48,7 +48,8 @@ class DownloadResource(Resource):
         set_cors_headers(request)
         request.setHeader(
             b"Content-Security-Policy",
-            b"default-src 'none';"
+            b"sandbox;"
+            b" default-src 'none';"
             b" script-src 'none';"
             b" plugin-types application/pdf;"
             b" style-src 'unsafe-inline';"
diff --git a/synapse/rest/media/v1/identicon_resource.py b/synapse/rest/media/v1/identicon_resource.py
deleted file mode 100644
index bdbd8d50dd..0000000000
--- a/synapse/rest/media/v1/identicon_resource.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from pydenticon import Generator
-
-from twisted.web.resource import Resource
-
-from synapse.http.servlet import parse_integer
-
-FOREGROUND = [
-    "rgb(45,79,255)",
-    "rgb(254,180,44)",
-    "rgb(226,121,234)",
-    "rgb(30,179,253)",
-    "rgb(232,77,65)",
-    "rgb(49,203,115)",
-    "rgb(141,69,170)"
-]
-
-BACKGROUND = "rgb(224,224,224)"
-SIZE = 5
-
-
-class IdenticonResource(Resource):
-    isLeaf = True
-
-    def __init__(self):
-        Resource.__init__(self)
-        self.generator = Generator(
-            SIZE, SIZE, foreground=FOREGROUND, background=BACKGROUND,
-        )
-
-    def generate_identicon(self, name, width, height):
-        v_padding = width % SIZE
-        h_padding = height % SIZE
-        top_padding = v_padding // 2
-        left_padding = h_padding // 2
-        bottom_padding = v_padding - top_padding
-        right_padding = h_padding - left_padding
-        width -= v_padding
-        height -= h_padding
-        padding = (top_padding, bottom_padding, left_padding, right_padding)
-        identicon = self.generator.generate(
-            name, width, height, padding=padding
-        )
-        return identicon
-
-    def render_GET(self, request):
-        name = "/".join(request.postpath)
-        width = parse_integer(request, "width", default=96)
-        height = parse_integer(request, "height", default=96)
-        identicon_bytes = self.generate_identicon(name, width, height)
-        request.setHeader(b"Content-Type", b"image/png")
-        request.setHeader(
-            b"Cache-Control", b"public,max-age=86400,s-maxage=86400"
-        )
-        return identicon_bytes
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 08b1867fab..bdffa97805 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -14,14 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import cgi
 import errno
 import logging
 import os
 import shutil
 
-from six import PY3, iteritems
-from six.moves.urllib import parse as urlparse
+from six import iteritems
 
 import twisted.internet.error
 import twisted.web.http
@@ -32,20 +30,24 @@ from synapse.api.errors import (
     FederationDeniedError,
     HttpResponseException,
     NotFoundError,
+    RequestSendFailed,
     SynapseError,
 )
-from synapse.http.matrixfederationclient import MatrixFederationHttpClient
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.util import logcontext
 from synapse.util.async_helpers import Linearizer
 from synapse.util.retryutils import NotRetryingDestination
-from synapse.util.stringutils import is_ascii, random_string
+from synapse.util.stringutils import random_string
 
-from ._base import FileInfo, respond_404, respond_with_responder
+from ._base import (
+    FileInfo,
+    get_filename_from_headers,
+    respond_404,
+    respond_with_responder,
+)
 from .config_resource import MediaConfigResource
 from .download_resource import DownloadResource
 from .filepath import MediaFilePaths
-from .identicon_resource import IdenticonResource
 from .media_storage import MediaStorage
 from .preview_url_resource import PreviewUrlResource
 from .storage_provider import StorageProviderWrapper
@@ -63,7 +65,7 @@ class MediaRepository(object):
     def __init__(self, hs):
         self.hs = hs
         self.auth = hs.get_auth()
-        self.client = MatrixFederationHttpClient(hs)
+        self.client = hs.get_http_client()
         self.clock = hs.get_clock()
         self.server_name = hs.hostname
         self.store = hs.get_datastore()
@@ -371,10 +373,10 @@ class MediaRepository(object):
                         "allow_remote": "false",
                     }
                 )
-            except twisted.internet.error.DNSLookupError as e:
-                logger.warn("HTTP error fetching remote media %s/%s: %r",
+            except RequestSendFailed as e:
+                logger.warn("Request failed fetching remote media %s/%s: %r",
                             server_name, media_id, e)
-                raise NotFoundError()
+                raise SynapseError(502, "Failed to fetch remote media")
 
             except HttpResponseException as e:
                 logger.warn("HTTP error fetching remote media %s/%s: %s",
@@ -398,39 +400,9 @@ class MediaRepository(object):
             yield finish()
 
         media_type = headers[b"Content-Type"][0].decode('ascii')
-
+        upload_name = get_filename_from_headers(headers)
         time_now_ms = self.clock.time_msec()
 
-        content_disposition = headers.get(b"Content-Disposition", None)
-        if content_disposition:
-            _, params = cgi.parse_header(content_disposition[0].decode('ascii'),)
-            upload_name = None
-
-            # First check if there is a valid UTF-8 filename
-            upload_name_utf8 = params.get("filename*", None)
-            if upload_name_utf8:
-                if upload_name_utf8.lower().startswith("utf-8''"):
-                    upload_name = upload_name_utf8[7:]
-
-            # If there isn't check for an ascii name.
-            if not upload_name:
-                upload_name_ascii = params.get("filename", None)
-                if upload_name_ascii and is_ascii(upload_name_ascii):
-                    upload_name = upload_name_ascii
-
-            if upload_name:
-                if PY3:
-                    upload_name = urlparse.unquote(upload_name)
-                else:
-                    upload_name = urlparse.unquote(upload_name.encode('ascii'))
-                try:
-                    if isinstance(upload_name, bytes):
-                        upload_name = upload_name.decode("utf-8")
-                except UnicodeDecodeError:
-                    upload_name = None
-        else:
-            upload_name = None
-
         logger.info("Stored remote media in file %r", fname)
 
         yield self.store.store_cached_remote_media(
@@ -769,7 +741,6 @@ class MediaRepositoryResource(Resource):
         self.putChild(b"thumbnail", ThumbnailResource(
             hs, media_repo, media_repo.media_storage,
         ))
-        self.putChild(b"identicon", IdenticonResource())
         if hs.config.url_preview_enabled:
             self.putChild(b"preview_url", PreviewUrlResource(
                 hs, media_repo, media_repo.media_storage,
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 1a7bfd6b56..ba3ab1d37d 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -12,7 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import cgi
+
 import datetime
 import errno
 import fnmatch
@@ -24,6 +24,7 @@ import shutil
 import sys
 import traceback
 
+import six
 from six import string_types
 from six.moves import urllib_parse as urlparse
 
@@ -34,7 +35,7 @@ from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
 from synapse.api.errors import Codes, SynapseError
-from synapse.http.client import SpiderHttpClient
+from synapse.http.client import SimpleHttpClient
 from synapse.http.server import (
     respond_with_json,
     respond_with_json_bytes,
@@ -42,15 +43,19 @@ from synapse.http.server import (
 )
 from synapse.http.servlet import parse_integer, parse_string
 from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.rest.media.v1._base import get_filename_from_headers
 from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.logcontext import make_deferred_yieldable, run_in_background
-from synapse.util.stringutils import is_ascii, random_string
+from synapse.util.stringutils import random_string
 
 from ._base import FileInfo
 
 logger = logging.getLogger(__name__)
 
+_charset_match = re.compile(br"<\s*meta[^>]*charset\s*=\s*([a-z0-9-]+)", flags=re.I)
+_content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I)
+
 
 class PreviewUrlResource(Resource):
     isLeaf = True
@@ -64,7 +69,12 @@ class PreviewUrlResource(Resource):
         self.max_spider_size = hs.config.max_spider_size
         self.server_name = hs.hostname
         self.store = hs.get_datastore()
-        self.client = SpiderHttpClient(hs)
+        self.client = SimpleHttpClient(
+            hs,
+            treq_args={"browser_like_redirects": True},
+            ip_whitelist=hs.config.url_preview_ip_range_whitelist,
+            ip_blacklist=hs.config.url_preview_ip_range_blacklist,
+        )
         self.media_repo = media_repo
         self.primary_base_path = media_repo.primary_base_path
         self.media_storage = media_storage
@@ -98,7 +108,7 @@ class PreviewUrlResource(Resource):
         # XXX: if get_user_by_req fails, what should we do in an async render?
         requester = yield self.auth.get_user_by_req(request)
         url = parse_string(request, "url")
-        if "ts" in request.args:
+        if b"ts" in request.args:
             ts = parse_integer(request, "ts")
         else:
             ts = self.clock.time_msec()
@@ -180,7 +190,12 @@ class PreviewUrlResource(Resource):
             cache_result["expires_ts"] > ts and
             cache_result["response_code"] / 100 == 2
         ):
-            defer.returnValue(cache_result["og"])
+            # It may be stored as text in the database, not as bytes (such as
+            # PostgreSQL). If so, encode it back before handing it on.
+            og = cache_result["og"]
+            if isinstance(og, six.text_type):
+                og = og.encode('utf8')
+            defer.returnValue(og)
             return
 
         media_info = yield self._download_url(url, user)
@@ -213,15 +228,28 @@ class PreviewUrlResource(Resource):
         elif _is_html(media_info['media_type']):
             # TODO: somehow stop a big HTML tree from exploding synapse's RAM
 
-            file = open(media_info['filename'])
-            body = file.read()
-            file.close()
+            with open(media_info['filename'], 'rb') as file:
+                body = file.read()
 
-            # clobber the encoding from the content-type, or default to utf-8
-            # XXX: this overrides any <meta/> or XML charset headers in the body
-            # which may pose problems, but so far seems to work okay.
-            match = re.match(r'.*; *charset=(.*?)(;|$)', media_info['media_type'], re.I)
-            encoding = match.group(1) if match else "utf-8"
+            encoding = None
+
+            # Let's try and figure out if it has an encoding set in a meta tag.
+            # Limit it to the first 1kb, since it ought to be in the meta tags
+            # at the top.
+            match = _charset_match.search(body[:1000])
+
+            # If we find a match, it should take precedence over the
+            # Content-Type header, so set it here.
+            if match:
+                encoding = match.group(1).decode('ascii')
+
+            # If we don't find a match, we'll look at the HTTP Content-Type, and
+            # if that doesn't exist, we'll fall back to UTF-8.
+            if not encoding:
+                match = _content_type_match.match(
+                    media_info['media_type']
+                )
+                encoding = match.group(1) if match else "utf-8"
 
             og = decode_and_calc_og(body, media_info['uri'], encoding)
 
@@ -295,6 +323,11 @@ class PreviewUrlResource(Resource):
                 length, headers, uri, code = yield self.client.get_file(
                     url, output_stream=f, max_size=self.max_spider_size,
                 )
+            except SynapseError:
+                # Pass SynapseErrors through directly, so that the servlet
+                # handler will return a SynapseError to the client instead of
+                # blank data or a 500.
+                raise
             except Exception as e:
                 # FIXME: pass through 404s and other error messages nicely
                 logger.warn("Error downloading %s: %r", url, e)
@@ -313,31 +346,7 @@ class PreviewUrlResource(Resource):
                 media_type = "application/octet-stream"
             time_now_ms = self.clock.time_msec()
 
-            content_disposition = headers.get(b"Content-Disposition", None)
-            if content_disposition:
-                _, params = cgi.parse_header(content_disposition[0],)
-                download_name = None
-
-                # First check if there is a valid UTF-8 filename
-                download_name_utf8 = params.get("filename*", None)
-                if download_name_utf8:
-                    if download_name_utf8.lower().startswith("utf-8''"):
-                        download_name = download_name_utf8[7:]
-
-                # If there isn't check for an ascii name.
-                if not download_name:
-                    download_name_ascii = params.get("filename", None)
-                    if download_name_ascii and is_ascii(download_name_ascii):
-                        download_name = download_name_ascii
-
-                if download_name:
-                    download_name = urlparse.unquote(download_name)
-                    try:
-                        download_name = download_name.decode("utf-8")
-                    except UnicodeDecodeError:
-                        download_name = None
-            else:
-                download_name = None
+            download_name = get_filename_from_headers(headers)
 
             yield self.store.store_local_media(
                 media_id=file_id,
diff --git a/synapse/storage/schema/delta/34/sent_txn_purge.py b/synapse/rest/saml2/__init__.py
index 0ffab10b6f..68da37ca6a 100644
--- a/synapse/storage/schema/delta/34/sent_txn_purge.py
+++ b/synapse/rest/saml2/__init__.py
@@ -1,4 +1,5 @@
-# Copyright 2016 OpenMarket Ltd
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -11,22 +12,18 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 import logging
 
-from synapse.storage.engines import PostgresEngine
-
-logger = logging.getLogger(__name__)
+from twisted.web.resource import Resource
 
+from synapse.rest.saml2.metadata_resource import SAML2MetadataResource
+from synapse.rest.saml2.response_resource import SAML2ResponseResource
 
-def run_create(cur, database_engine, *args, **kwargs):
-    if isinstance(database_engine, PostgresEngine):
-        cur.execute("TRUNCATE sent_transactions")
-    else:
-        cur.execute("DELETE FROM sent_transactions")
-
-    cur.execute("CREATE INDEX sent_transactions_ts ON sent_transactions(ts)")
+logger = logging.getLogger(__name__)
 
 
-def run_upgrade(cur, database_engine, *args, **kwargs):
-    pass
+class SAML2Resource(Resource):
+    def __init__(self, hs):
+        Resource.__init__(self)
+        self.putChild(b"metadata.xml", SAML2MetadataResource(hs))
+        self.putChild(b"authn_response", SAML2ResponseResource(hs))
diff --git a/synapse/rest/saml2/metadata_resource.py b/synapse/rest/saml2/metadata_resource.py
new file mode 100644
index 0000000000..e8c680aeb4
--- /dev/null
+++ b/synapse/rest/saml2/metadata_resource.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import saml2.metadata
+
+from twisted.web.resource import Resource
+
+
+class SAML2MetadataResource(Resource):
+    """A Twisted web resource which renders the SAML metadata"""
+
+    isLeaf = 1
+
+    def __init__(self, hs):
+        Resource.__init__(self)
+        self.sp_config = hs.config.saml2_sp_config
+
+    def render_GET(self, request):
+        metadata_xml = saml2.metadata.create_metadata_string(
+            configfile=None, config=self.sp_config,
+        )
+        request.setHeader(b"Content-Type", b"text/xml; charset=utf-8")
+        return metadata_xml
diff --git a/synapse/rest/saml2/response_resource.py b/synapse/rest/saml2/response_resource.py
new file mode 100644
index 0000000000..69fb77b322
--- /dev/null
+++ b/synapse/rest/saml2/response_resource.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+import saml2
+from saml2.client import Saml2Client
+
+from twisted.web.resource import Resource
+from twisted.web.server import NOT_DONE_YET
+
+from synapse.api.errors import CodeMessageException
+from synapse.http.server import wrap_html_request_handler
+from synapse.http.servlet import parse_string
+from synapse.rest.client.v1.login import SSOAuthHandler
+
+logger = logging.getLogger(__name__)
+
+
+class SAML2ResponseResource(Resource):
+    """A Twisted web resource which handles the SAML response"""
+
+    isLeaf = 1
+
+    def __init__(self, hs):
+        Resource.__init__(self)
+
+        self._saml_client = Saml2Client(hs.config.saml2_sp_config)
+        self._sso_auth_handler = SSOAuthHandler(hs)
+
+    def render_POST(self, request):
+        self._async_render_POST(request)
+        return NOT_DONE_YET
+
+    @wrap_html_request_handler
+    def _async_render_POST(self, request):
+        resp_bytes = parse_string(request, 'SAMLResponse', required=True)
+        relay_state = parse_string(request, 'RelayState', required=True)
+
+        try:
+            saml2_auth = self._saml_client.parse_authn_request_response(
+                resp_bytes, saml2.BINDING_HTTP_POST,
+            )
+        except Exception as e:
+            logger.warning("Exception parsing SAML2 response", exc_info=1)
+            raise CodeMessageException(
+                400, "Unable to parse SAML2 response: %s" % (e,),
+            )
+
+        if saml2_auth.not_signed:
+            raise CodeMessageException(400, "SAML2 response was not signed")
+
+        if "uid" not in saml2_auth.ava:
+            raise CodeMessageException(400, "uid not in SAML2 response")
+
+        username = saml2_auth.ava["uid"][0]
+
+        displayName = saml2_auth.ava.get("displayName", [None])[0]
+        return self._sso_auth_handler.on_successful_auth(
+            username, request, relay_state,
+            user_display_name=displayName,
+        )
diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py
new file mode 100644
index 0000000000..c0a4ae93e5
--- /dev/null
+++ b/synapse/rest/well_known.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import logging
+
+from twisted.web.resource import Resource
+
+from synapse.http.server import set_cors_headers
+
+logger = logging.getLogger(__name__)
+
+
+class WellKnownBuilder(object):
+    """Utility to construct the well-known response
+
+    Args:
+        hs (synapse.server.HomeServer):
+    """
+    def __init__(self, hs):
+        self._config = hs.config
+
+    def get_well_known(self):
+        # if we don't have a public_base_url, we can't help much here.
+        if self._config.public_baseurl is None:
+            return None
+
+        result = {
+            "m.homeserver": {
+                "base_url": self._config.public_baseurl,
+            },
+        }
+
+        if self._config.default_identity_server:
+            result["m.identity_server"] = {
+                "base_url": self._config.default_identity_server,
+            }
+
+        return result
+
+
+class WellKnownResource(Resource):
+    """A Twisted web resource which renders the .well-known file"""
+
+    isLeaf = 1
+
+    def __init__(self, hs):
+        Resource.__init__(self)
+        self._well_known_builder = WellKnownBuilder(hs)
+
+    def render_GET(self, request):
+        set_cors_headers(request)
+        r = self._well_known_builder.get_well_known()
+        if not r:
+            request.setResponseCode(404)
+            request.setHeader(b"Content-Type", b"text/plain")
+            return b'.well-known not available'
+
+        logger.error("returning: %s", r)
+        request.setHeader(b"Content-Type", b"application/json")
+        return json.dumps(r).encode("utf-8")
diff --git a/synapse/server.py b/synapse/server.py
index cf6b872cbd..4d364fccce 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -23,6 +23,7 @@ import abc
 import logging
 
 from twisted.enterprise import adbapi
+from twisted.mail.smtp import sendmail
 from twisted.web.client import BrowserLikePolicyForHTTPS
 
 from synapse.api.auth import Auth
@@ -30,6 +31,7 @@ from synapse.api.filtering import Filtering
 from synapse.api.ratelimiting import Ratelimiter
 from synapse.appservice.api import ApplicationServiceApi
 from synapse.appservice.scheduler import ApplicationServiceScheduler
+from synapse.crypto import context_factory
 from synapse.crypto.keyring import Keyring
 from synapse.events.builder import EventBuilderFactory
 from synapse.events.spamcheck import SpamChecker
@@ -45,6 +47,7 @@ from synapse.federation.transport.client import TransportLayerClient
 from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer
 from synapse.groups.groups_server import GroupsServerHandler
 from synapse.handlers import Handlers
+from synapse.handlers.acme import AcmeHandler
 from synapse.handlers.appservice import ApplicationServicesHandler
 from synapse.handlers.auth import AuthHandler, MacaroonGenerator
 from synapse.handlers.deactivate_account import DeactivateAccountHandler
@@ -61,6 +64,7 @@ from synapse.handlers.presence import PresenceHandler
 from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler
 from synapse.handlers.read_marker import ReadMarkerHandler
 from synapse.handlers.receipts import ReceiptsHandler
+from synapse.handlers.register import RegistrationHandler
 from synapse.handlers.room import RoomContextHandler, RoomCreationHandler
 from synapse.handlers.room_list import RoomListHandler
 from synapse.handlers.room_member import RoomMemberMasterHandler
@@ -110,6 +114,8 @@ class HomeServer(object):
 
     Attributes:
         config (synapse.config.homeserver.HomeserverConfig):
+        _listening_services (list[twisted.internet.tcp.Port]): TCP ports that
+            we are listening on to provide HTTP services.
     """
 
     __metaclass__ = abc.ABCMeta
@@ -128,6 +134,7 @@ class HomeServer(object):
         'sync_handler',
         'typing_handler',
         'room_list_handler',
+        'acme_handler',
         'auth_handler',
         'device_handler',
         'e2e_keys_handler',
@@ -174,6 +181,8 @@ class HomeServer(object):
         'message_handler',
         'pagination_handler',
         'room_context_handler',
+        'sendmail',
+        'registration_handler',
     ]
 
     # This is overridden in derived application classes
@@ -192,6 +201,7 @@ class HomeServer(object):
         self._reactor = reactor
         self.hostname = hostname
         self._building = {}
+        self._listening_services = []
 
         self.clock = Clock(reactor)
         self.distributor = Distributor()
@@ -269,6 +279,9 @@ class HomeServer(object):
     def build_room_creation_handler(self):
         return RoomCreationHandler(self)
 
+    def build_sendmail(self):
+        return sendmail
+
     def build_state_handler(self):
         return StateHandler(self)
 
@@ -305,6 +318,9 @@ class HomeServer(object):
     def build_e2e_room_keys_handler(self):
         return E2eRoomKeysHandler(self)
 
+    def build_acme_handler(self):
+        return AcmeHandler(self)
+
     def build_application_service_api(self):
         return ApplicationServiceApi(self)
 
@@ -345,10 +361,7 @@ class HomeServer(object):
         return Keyring(self)
 
     def build_event_builder_factory(self):
-        return EventBuilderFactory(
-            clock=self.get_clock(),
-            hostname=self.hostname,
-        )
+        return EventBuilderFactory(self)
 
     def build_filtering(self):
         return Filtering(self)
@@ -357,7 +370,10 @@ class HomeServer(object):
         return PusherPool(self)
 
     def build_http_client(self):
-        return MatrixFederationHttpClient(self)
+        tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
+            self.config
+        )
+        return MatrixFederationHttpClient(self, tls_client_options_factory)
 
     def build_db_pool(self):
         name = self.db_config["name"]
@@ -467,6 +483,9 @@ class HomeServer(object):
     def build_room_context_handler(self):
         return RoomContextHandler(self)
 
+    def build_registration_handler(self):
+        return RegistrationHandler(self)
+
     def remove_pusher(self, app_id, push_key, user_id):
         return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
 
diff --git a/synapse/server.pyi b/synapse/server.pyi
index ce28486233..06cd083a74 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -7,6 +7,9 @@ import synapse.handlers.auth
 import synapse.handlers.deactivate_account
 import synapse.handlers.device
 import synapse.handlers.e2e_keys
+import synapse.handlers.room
+import synapse.handlers.room_member
+import synapse.handlers.message
 import synapse.handlers.set_password
 import synapse.rest.media.v1.media_repository
 import synapse.server_notices.server_notices_manager
@@ -50,6 +53,9 @@ class HomeServer(object):
     def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler:
         pass
 
+    def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler:
+        pass
+
     def get_event_creation_handler(self) -> synapse.handlers.message.EventCreationHandler:
         pass
 
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 9b40b18d5b..68058f613c 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -261,7 +261,7 @@ class StateHandler(object):
         logger.debug("calling resolve_state_groups from compute_event_context")
 
         entry = yield self.resolve_state_groups_for_events(
-            event.room_id, [e for e, _ in event.prev_events],
+            event.room_id, event.prev_event_ids(),
         )
 
         prev_state_ids = entry.state
@@ -607,9 +607,11 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
         return v1.resolve_events_with_store(
             state_sets, event_map, state_res_store.get_events,
         )
-    elif room_version == RoomVersions.VDH_TEST:
+    elif room_version in (
+        RoomVersions.STATE_V2_TEST, RoomVersions.V2, RoomVersions.V3,
+    ):
         return v2.resolve_events_with_store(
-            state_sets, event_map, state_res_store,
+            room_version, state_sets, event_map, state_res_store,
         )
     else:
         # This should only happen if we added a version but forgot to add it to
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index 70a981f4a2..6d3afcae7c 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -21,7 +21,7 @@ from six import iteritems, iterkeys, itervalues
 from twisted.internet import defer
 
 from synapse import event_auth
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, RoomVersions
 from synapse.api.errors import AuthError
 
 logger = logging.getLogger(__name__)
@@ -274,7 +274,11 @@ def _resolve_auth_events(events, auth_events):
         auth_events[(prev_event.type, prev_event.state_key)] = prev_event
         try:
             # The signatures have already been checked at this point
-            event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
+            event_auth.check(
+                RoomVersions.V1, event, auth_events,
+                do_sig_check=False,
+                do_size_check=False,
+            )
             prev_event = event
         except AuthError:
             return prev_event
@@ -286,7 +290,11 @@ def _resolve_normal_events(events, auth_events):
     for event in _ordered_events(events):
         try:
             # The signatures have already been checked at this point
-            event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
+            event_auth.check(
+                RoomVersions.V1, event, auth_events,
+                do_sig_check=False,
+                do_size_check=False,
+            )
             return event
         except AuthError:
             pass
@@ -298,6 +306,8 @@ def _resolve_normal_events(events, auth_events):
 
 def _ordered_events(events):
     def key_func(e):
-        return -int(e.depth), hashlib.sha1(e.event_id.encode('ascii')).hexdigest()
+        # we have to use utf-8 rather than ascii here because it turns out we allow
+        # people to send us events with non-ascii event IDs :/
+        return -int(e.depth), hashlib.sha1(e.event_id.encode('utf-8')).hexdigest()
 
     return sorted(events, key=key_func)
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 5d06f7e928..650995c92c 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -29,10 +29,12 @@ logger = logging.getLogger(__name__)
 
 
 @defer.inlineCallbacks
-def resolve_events_with_store(state_sets, event_map, state_res_store):
+def resolve_events_with_store(room_version, state_sets, event_map, state_res_store):
     """Resolves the state using the v2 state resolution algorithm
 
     Args:
+        room_version (str): The room version
+
         state_sets(list): List of dicts of (type, state_key) -> event_id,
             which are the different state groups to resolve.
 
@@ -53,6 +55,10 @@ def resolve_events_with_store(state_sets, event_map, state_res_store):
 
     logger.debug("Computing conflicted state")
 
+    # We use event_map as a cache, so if its None we need to initialize it
+    if event_map is None:
+        event_map = {}
+
     # First split up the un/conflicted state
     unconflicted_state, conflicted_state = _seperate(state_sets)
 
@@ -100,7 +106,7 @@ def resolve_events_with_store(state_sets, event_map, state_res_store):
 
     # Now sequentially auth each one
     resolved_state = yield _iterative_auth_checks(
-        sorted_power_events, unconflicted_state, event_map,
+        room_version, sorted_power_events, unconflicted_state, event_map,
         state_res_store,
     )
 
@@ -125,7 +131,7 @@ def resolve_events_with_store(state_sets, event_map, state_res_store):
     logger.debug("resolving remaining events")
 
     resolved_state = yield _iterative_auth_checks(
-        leftover_events, resolved_state, event_map,
+        room_version, leftover_events, resolved_state, event_map,
         state_res_store,
     )
 
@@ -155,7 +161,7 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store):
     event = yield _get_event(event_id, event_map, state_res_store)
 
     pl = None
-    for aid, _ in event.auth_events:
+    for aid in event.auth_event_ids():
         aev = yield _get_event(aid, event_map, state_res_store)
         if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
             pl = aev
@@ -163,7 +169,7 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store):
 
     if pl is None:
         # Couldn't find power level. Check if they're the creator of the room
-        for aid, _ in event.auth_events:
+        for aid in event.auth_event_ids():
             aev = yield _get_event(aid, event_map, state_res_store)
             if (aev.type, aev.state_key) == (EventTypes.Create, ""):
                 if aev.content.get("creator") == event.sender:
@@ -295,7 +301,7 @@ def _add_event_and_auth_chain_to_graph(graph, event_id, event_map,
         graph.setdefault(eid, set())
 
         event = yield _get_event(eid, event_map, state_res_store)
-        for aid, _ in event.auth_events:
+        for aid in event.auth_event_ids():
             if aid in auth_diff:
                 if aid not in graph:
                     state.append(aid)
@@ -346,11 +352,13 @@ def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_
 
 
 @defer.inlineCallbacks
-def _iterative_auth_checks(event_ids, base_state, event_map, state_res_store):
+def _iterative_auth_checks(room_version, event_ids, base_state, event_map,
+                           state_res_store):
     """Sequentially apply auth checks to each event in given list, updating the
     state as it goes along.
 
     Args:
+        room_version (str)
         event_ids (list[str]): Ordered list of events to apply auth checks to
         base_state (dict[tuple[str, str], str]): The set of state to start with
         event_map (dict[str,FrozenEvent])
@@ -365,7 +373,7 @@ def _iterative_auth_checks(event_ids, base_state, event_map, state_res_store):
         event = event_map[event_id]
 
         auth_events = {}
-        for aid, _ in event.auth_events:
+        for aid in event.auth_event_ids():
             ev = yield _get_event(aid, event_map, state_res_store)
 
             if ev.rejected_reason is None:
@@ -381,7 +389,7 @@ def _iterative_auth_checks(event_ids, base_state, event_map, state_res_store):
 
         try:
             event_auth.check(
-                event, auth_events,
+                room_version, event, auth_events,
                 do_sig_check=False,
                 do_size_check=False
             )
@@ -413,9 +421,9 @@ def _mainline_sort(event_ids, resolved_power_event_id, event_map,
     while pl:
         mainline.append(pl)
         pl_ev = yield _get_event(pl, event_map, state_res_store)
-        auth_events = pl_ev.auth_events
+        auth_events = pl_ev.auth_event_ids()
         pl = None
-        for aid, _ in auth_events:
+        for aid in auth_events:
             ev = yield _get_event(aid, event_map, state_res_store)
             if (ev.type, ev.state_key) == (EventTypes.PowerLevels, ""):
                 pl = aid
@@ -460,10 +468,10 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
         if depth is not None:
             defer.returnValue(depth)
 
-        auth_events = event.auth_events
+        auth_events = event.auth_event_ids()
         event = None
 
-        for aid, _ in auth_events:
+        for aid in auth_events:
             aev = yield _get_event(aid, event_map, state_res_store)
             if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
                 event = aev
diff --git a/synapse/static/client/login/index.html b/synapse/static/client/login/index.html
index 96c8723cab..bcb6bc6bb7 100644
--- a/synapse/static/client/login/index.html
+++ b/synapse/static/client/login/index.html
@@ -12,35 +12,30 @@
         <h1>Log in with one of the following methods</h1>
 
         <span id="feedback" style="color: #f00"></span>
-        <br/>
-        <br/>
 
         <div id="loading">
             <img src="spinner.gif" />
         </div>
 
-        <div id="cas_flow" class="login_flow" style="display:none"
-                onclick="gotoCas(); return false;">
-            CAS Authentication: <button id="cas_button" style="margin: 10px">Log in</button>
+        <div id="sso_flow" class="login_flow" style="display:none">
+            Single-sign on:
+            <form id="sso_form" action="/_matrix/client/r0/login/sso/redirect" method="get">
+                <input id="sso_redirect_url" type="hidden" name="redirectUrl" value=""/>
+                <input type="submit" value="Log in"/>
+            </form>
         </div>
 
-        <br/>
-
-        <form id="password_form" class="login_flow" style="display:none"
-                onsubmit="matrixLogin.password_login(); return false;">
-            <div>
-                Password Authentication:<br/>
-
-                <div style="text-align: center">
-                    <input id="user_id" size="32" type="text" placeholder="Matrix ID (e.g. bob)" autocapitalize="off" autocorrect="off" />
-                    <br/>
-                    <input id="password" size="32" type="password" placeholder="Password"/>
-                    <br/>
+        <div id="password_flow" class="login_flow" style="display:none">
+            Password Authentication:
+            <form onsubmit="matrixLogin.password_login(); return false;">
+                <input id="user_id" size="32" type="text" placeholder="Matrix ID (e.g. bob)" autocapitalize="off" autocorrect="off" />
+                <br/>
+                <input id="password" size="32" type="password" placeholder="Password"/>
+                <br/>
 
-                    <button type="submit" style="margin: 10px">Log in</button>
-                </div>
-            </div>
-        </form>
+                <input type="submit" value="Log in"/>
+            </form>
+        </div>
 
         <div id="no_login_types" type="button" class="login_flow" style="display:none">
             Log in currently unavailable.
diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js
index bfb7386035..3a958749a1 100644
--- a/synapse/static/client/login/js/login.js
+++ b/synapse/static/client/login/js/login.js
@@ -1,7 +1,8 @@
 window.matrixLogin = {
-    endpoint: location.origin + "/_matrix/client/api/v1/login",
+    endpoint: location.origin + "/_matrix/client/r0/login",
     serverAcceptsPassword: false,
-    serverAcceptsCas: false
+    serverAcceptsCas: false,
+    serverAcceptsSso: false,
 };
 
 var submitPassword = function(user, pwd) {
@@ -40,12 +41,6 @@ var errorFunc = function(err) {
     }
 };
 
-var gotoCas = function() {
-    var this_page = window.location.origin + window.location.pathname;
-    var redirect_url = matrixLogin.endpoint + "/cas/redirect?redirectUrl=" + encodeURIComponent(this_page);
-    window.location.replace(redirect_url);
-}
-
 var setFeedbackString = function(text) {
     $("#feedback").text(text);
 };
@@ -53,12 +48,18 @@ var setFeedbackString = function(text) {
 var show_login = function() {
     $("#loading").hide();
 
+    var this_page = window.location.origin + window.location.pathname;
+    $("#sso_redirect_url").val(encodeURIComponent(this_page));
+
     if (matrixLogin.serverAcceptsPassword) {
-        $("#password_form").show();
+        $("#password_flow").show();
     }
 
-    if (matrixLogin.serverAcceptsCas) {
-        $("#cas_flow").show();
+    if (matrixLogin.serverAcceptsSso) {
+        $("#sso_flow").show();
+    } else if (matrixLogin.serverAcceptsCas) {
+        $("#sso_form").attr("action", "/_matrix/client/r0/login/cas/redirect");
+        $("#sso_flow").show();
     }
 
     if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas) {
@@ -67,8 +68,8 @@ var show_login = function() {
 };
 
 var show_spinner = function() {
-    $("#password_form").hide();
-    $("#cas_flow").hide();
+    $("#password_flow").hide();
+    $("#sso_flow").hide();
     $("#no_login_types").hide();
     $("#loading").show();
 };
@@ -84,7 +85,10 @@ var fetch_info = function(cb) {
                 matrixLogin.serverAcceptsCas = true;
                 console.log("Server accepts CAS");
             }
-
+            if ("m.login.sso" === flow.type) {
+                matrixLogin.serverAcceptsSso = true;
+                console.log("Server accepts SSO");
+            }
             if ("m.login.password" === flow.type) {
                 matrixLogin.serverAcceptsPassword = true;
                 console.log("Server accepts password");
diff --git a/synapse/static/client/login/style.css b/synapse/static/client/login/style.css
index 73da0b5117..1cce5ed950 100644
--- a/synapse/static/client/login/style.css
+++ b/synapse/static/client/login/style.css
@@ -19,30 +19,23 @@ a:hover   { color: #000; }
 a:active  { color: #000; }
 
 input {
-   width: 90%
-}
-
-textarea, input {
-   font-family: inherit;
-   font-size: inherit;
    margin: 5px;
 }
 
-.smallPrint {
-    color: #888;
-    font-size: 9pt ! important;
-    font-style: italic ! important;
+textbox, input[type="text"], input[type="password"] {
+   width: 90%;
 }
 
-.g-recaptcha div {
-    margin: auto;
+form {
+    text-align: center;
+    margin: 10px 0 0 0;
 }
 
 .login_flow {
+    width: 300px;
     text-align: left;
     padding: 10px;
     margin-bottom: 40px;
-    display: inline-block;
 
     -webkit-border-radius: 10px;
     -moz-border-radius: 10px;
diff --git a/synapse/static/client/register/index.html b/synapse/static/client/register/index.html
index 886f2edd1f..6edc4deb03 100644
--- a/synapse/static/client/register/index.html
+++ b/synapse/static/client/register/index.html
@@ -4,7 +4,7 @@
 <meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'> 
 <link rel="stylesheet" href="style.css">
 <script src="js/jquery-2.1.3.min.js"></script>
-<script src="https://www.google.com/recaptcha/api/js/recaptcha_ajax.js"></script>
+<script src="https://www.recaptcha.net/recaptcha/api/js/recaptcha_ajax.js"></script>
 <script src="register_config.js"></script>
 <script src="js/register.js"></script>
 </head>
diff --git a/synapse/static/index.html b/synapse/static/index.html
new file mode 100644
index 0000000000..d3f1c7dce0
--- /dev/null
+++ b/synapse/static/index.html
@@ -0,0 +1,63 @@
+<!DOCTYPE html>
+<html lang="en">
+  <head>
+   <title>Synapse is running</title>
+   <style>
+       body {
+         font-family: -apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,Oxygen-Sans,Ubuntu,Cantarell,"Helvetica Neue",sans-serif;
+         max-width: 40em;
+         margin: auto;
+         text-align: center;
+       }
+       h1, p {
+         margin: 1.5em;
+       }
+       hr {
+        border: none;
+        background-color: #ccc;
+        color: #ccc;
+        height: 1px;
+        width: 7em;
+        margin-top: 4em;
+      }
+       .logo {
+         display: block;
+         width: 12em;
+         margin: 4em auto;
+       }
+   </style>
+  </head>
+  <body>
+    <div class="logo">
+       <svg role="img" aria-label="[Matrix logo]" viewBox="0 0 200 85" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+          <g id="parent" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
+              <g id="child" transform="translate(-122.000000, -6.000000)" fill="#000000" fill-rule="nonzero">
+                  <g id="matrix-logo" transform="translate(122.000000, 6.000000)">
+                      <polygon id="left-bracket" points="2.24708861 1.93811009 2.24708861 82.7268844 8.10278481 82.7268844 8.10278481 84.6652459 0 84.6652459 0 0 8.10278481 0 8.10278481 1.93811009"></polygon>
+                      <path d="M24.8073418,27.5493174 L24.8073418,31.6376991 L24.924557,31.6376991 C26.0227848,30.0814294 27.3455696,28.8730642 28.8951899,28.0163743 C30.4437975,27.1611927 32.2189873,26.7318422 34.218481,26.7318422 C36.1394937,26.7318422 37.8946835,27.102622 39.4825316,27.8416679 C41.0708861,28.5819706 42.276962,29.8856073 43.1005063,31.7548404 C44.0017722,30.431345 45.2270886,29.2629486 46.7767089,28.2506569 C48.3253165,27.2388679 50.158481,26.7318422 52.2764557,26.7318422 C53.8843038,26.7318422 55.3736709,26.9269101 56.7473418,27.3162917 C58.1189873,27.7056734 59.295443,28.3285835 60.2759494,29.185022 C61.255443,30.0422147 62.02,31.1615927 62.5701266,32.5426532 C63.1187342,33.9262275 63.3936709,35.5898349 63.3936709,37.5372459 L63.3936709,57.7443688 L55.0410127,57.7441174 L55.0410127,40.6319376 C55.0410127,39.6201486 55.0020253,38.6661761 54.9232911,37.7700202 C54.8440506,36.8751211 54.6293671,36.0968606 54.2764557,35.4339817 C53.9232911,34.772611 53.403038,34.2464807 52.7177215,33.8568477 C52.0313924,33.4689743 51.0997468,33.2731523 49.9235443,33.2731523 C48.7473418,33.2731523 47.7962025,33.4983853 47.0706329,33.944578 C46.344557,34.393033 45.7764557,34.9774826 45.3650633,35.6969211 C44.9534177,36.4181193 44.6787342,37.2353431 44.5417722,38.150855 C44.4037975,39.0653615 44.3356962,39.9904257 44.3356962,40.9247908 L44.3356962,57.7443688 L35.9835443,57.7443688 L35.9835443,40.8079009 C35.9835443,39.9124991 35.963038,39.0263982 35.9253165,38.150855 C35.8853165,37.2743064 35.7192405,36.4666349 35.424557,35.7263321 C35.1303797,34.9872862 34.64,34.393033 33.9539241,33.944578 C33.2675949,33.4983853 32.2579747,33.2731523 30.9248101,33.2731523 C30.5321519,33.2731523 30.0126582,33.3608826 29.3663291,33.5365945 C28.7192405,33.7118037 28.0913924,34.0433688 27.4840506,34.5292789 C26.875443,35.0164459 26.3564557,35.7172826 25.9250633,36.6315376 C25.4934177,37.5470495 25.2779747,38.7436 25.2779747,40.2229486 L25.2779747,57.7441174 L16.9260759,57.7443688 L16.9260759,27.5493174 L24.8073418,27.5493174 Z" id="m"></path>
+                      <path d="M68.7455696,31.9886202 C69.6075949,30.7033339 70.7060759,29.672189 72.0397468,28.8926716 C73.3724051,28.1141596 74.8716456,27.5596239 76.5387342,27.2283101 C78.2050633,26.8977505 79.8817722,26.7315908 81.5678481,26.7315908 C83.0974684,26.7315908 84.6458228,26.8391798 86.2144304,27.0525982 C87.7827848,27.2675248 89.2144304,27.6865688 90.5086076,28.3087248 C91.8025316,28.9313835 92.8610127,29.7983798 93.6848101,30.9074514 C94.5083544,32.0170257 94.92,33.4870734 94.92,35.3173431 L94.92,51.026844 C94.92,52.3913138 94.998481,53.6941963 95.1556962,54.9400165 C95.3113924,56.1865908 95.5863291,57.120956 95.9787342,57.7436147 L87.5091139,57.7436147 C87.3518987,57.276055 87.2240506,56.7996972 87.1265823,56.3125303 C87.0278481,55.8266202 86.9592405,55.3301523 86.9207595,54.8236294 C85.5873418,56.1865908 84.0182278,57.1405633 82.2156962,57.6857982 C80.4113924,58.2295248 78.5683544,58.503022 76.6860759,58.503022 C75.2346835,58.503022 73.8817722,58.3275615 72.6270886,57.9776459 C71.3718987,57.6269761 70.2744304,57.082244 69.3334177,56.3411872 C68.3921519,55.602644 67.656962,54.6680275 67.1275949,53.5390972 C66.5982278,52.410167 66.3331646,51.065556 66.3331646,49.5087835 C66.3331646,47.7961578 66.6367089,46.384178 67.2455696,45.2756092 C67.8529114,44.1652807 68.6367089,43.2799339 69.5987342,42.6173064 C70.5589873,41.9556844 71.6567089,41.4592165 72.8924051,41.1284055 C74.1273418,40.7978459 75.3721519,40.5356606 76.6270886,40.3398385 C77.8820253,40.1457761 79.116962,39.9896716 80.3329114,39.873033 C81.5483544,39.7558917 82.6270886,39.5804312 83.5681013,39.3469028 C84.5093671,39.1133743 85.2536709,38.7732624 85.8032911,38.3250587 C86.3513924,37.8773578 86.6063291,37.2252881 86.5678481,36.3680954 C86.5678481,35.4731963 86.4210127,34.7620532 86.1268354,34.2366771 C85.8329114,33.7113009 85.4405063,33.3018092 84.9506329,33.0099615 C84.4602532,32.7181138 83.8916456,32.5232972 83.2450633,32.4255119 C82.5977215,32.3294862 81.9010127,32.2797138 81.156962,32.2797138 C79.5098734,32.2797138 78.2159494,32.6303835 77.2746835,33.3312202 C76.3339241,34.0320569 75.7837975,35.2007046 75.6275949,36.8354037 L67.275443,36.8354037 C67.3924051,34.8892495 67.8817722,33.2726495 68.7455696,31.9886202 Z M85.2440506,43.6984752 C84.7149367,43.873433 84.1460759,44.0189798 83.5387342,44.1361211 C82.9306329,44.253011 82.2936709,44.350545 81.6270886,44.4279688 C80.96,44.5066495 80.2934177,44.6034294 79.6273418,44.7203193 C78.9994937,44.8362037 78.3820253,44.9933138 77.7749367,45.1871248 C77.1663291,45.3829468 76.636962,45.6451321 76.1865823,45.9759431 C75.7349367,46.3070055 75.3724051,46.7263009 75.0979747,47.2313156 C74.8232911,47.7375872 74.6863291,48.380356 74.6863291,49.1588679 C74.6863291,49.8979138 74.8232911,50.5218294 75.0979747,51.026844 C75.3724051,51.5338697 75.7455696,51.9328037 76.2159494,52.2246514 C76.6863291,52.5164991 77.2349367,52.7213706 77.8632911,52.8375064 C78.4898734,52.9546477 79.136962,53.012967 79.8037975,53.012967 C81.4506329,53.012967 82.724557,52.740978 83.6273418,52.1952404 C84.5288608,51.6507596 85.1949367,50.9981872 85.6270886,50.2382771 C86.0579747,49.4793725 86.323038,48.7119211 86.4212658,47.9321523 C86.518481,47.1536404 86.5681013,46.5304789 86.5681013,46.063422 L86.5681013,42.9677248 C86.2146835,43.2799339 85.7736709,43.5230147 85.2440506,43.6984752 Z" id="a"></path>
+                      <path d="M116.917975,27.5493174 L116.917975,33.0976917 L110.801266,33.0976917 L110.801266,48.0492936 C110.801266,49.4502128 111.036203,50.3850807 111.507089,50.8518862 C111.976962,51.3191945 112.918734,51.5527229 114.33038,51.5527229 C114.801013,51.5527229 115.251392,51.5336183 115.683038,51.4944037 C116.114177,51.4561945 116.526076,51.3968697 116.917975,51.3194459 L116.917975,57.7438661 C116.212152,57.860756 115.427595,57.9381798 114.565316,57.9778972 C113.702785,58.0153523 112.859747,58.0357138 112.036203,58.0357138 C110.742278,58.0357138 109.516456,57.9477321 108.36,57.7722716 C107.202785,57.5975651 106.183544,57.2577046 105.301519,56.7509303 C104.418987,56.2454128 103.722785,55.5242147 103.213418,54.5898495 C102.703038,53.6562385 102.448608,52.4292716 102.448608,50.9099541 L102.448608,33.0976917 L97.3903797,33.0976917 L97.3903797,27.5493174 L102.448608,27.5493174 L102.448608,18.4967596 L110.801013,18.4967596 L110.801013,27.5493174 L116.917975,27.5493174 Z" id="t"></path>
+                      <path d="M128.857975,27.5493174 L128.857975,33.1565138 L128.975696,33.1565138 C129.367089,32.2213945 129.896203,31.3559064 130.563544,30.557033 C131.23038,29.7596679 131.99443,29.0776844 132.857215,28.5130936 C133.719241,27.9495083 134.641266,27.5113596 135.622532,27.1988991 C136.601772,26.8879468 137.622025,26.7315908 138.681013,26.7315908 C139.229873,26.7315908 139.836962,26.8296275 140.504304,27.0239413 L140.504304,34.7336477 C140.111646,34.6552183 139.641013,34.586844 139.092658,34.5290275 C138.543291,34.4704569 138.014177,34.4410459 137.504304,34.4410459 C135.974937,34.4410459 134.681013,34.6949358 133.622785,35.2004532 C132.564051,35.7067248 131.711392,36.397255 131.064051,37.2735523 C130.417215,38.1501009 129.955443,39.1714422 129.681266,40.3398385 C129.407089,41.5074807 129.269873,42.7736624 129.269873,44.1361211 L129.269873,57.7438661 L120.917722,57.7438661 L120.917722,27.5493174 L128.857975,27.5493174 Z" id="r"></path>
+                      <path d="M144.033165,22.8767376 L144.033165,16.0435798 L152.386076,16.0435798 L152.386076,22.8767376 L144.033165,22.8767376 Z M152.386076,27.5493174 L152.386076,57.7438661 L144.033165,57.7438661 L144.033165,27.5493174 L152.386076,27.5493174 Z" id="i"></path>
+                      <polygon id="x" points="156.738228 27.5493174 166.266582 27.5493174 171.619494 35.4337303 176.913418 27.5493174 186.147848 27.5493174 176.148861 41.6831927 187.383544 57.7441174 177.85443 57.7441174 171.501772 48.2245028 165.148861 57.7441174 155.797468 57.7441174 166.737468 41.8589046"></polygon>
+                      <polygon id="right-bracket" points="197.580759 82.7268844 197.580759 1.93811009 191.725063 1.93811009 191.725063 0 199.828354 0 199.828354 84.6652459 191.725063 84.6652459 191.725063 82.7268844"></polygon>
+                  </g>
+              </g>
+          </g>
+      </svg>
+    </div>
+    <h1>It works! Synapse is running</h1>
+    <p>Your Synapse server is listening on this port and is ready for messages.</p>
+    <p>To use this server you'll need <a href="https://matrix.org/docs/projects/try-matrix-now.html#clients" target="_blank">a Matrix client</a>.
+    </p>
+    <p>Welcome to the Matrix universe :)</p>
+    <hr>
+    <p>
+      <small>
+        <a href="https://matrix.org" target="_blank">
+          matrix.org
+        </a>
+      </small>
+    </p>
+  </body>
+</html>
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 53c685c173..42cd3c83ad 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -14,12 +14,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import datetime
+import calendar
 import logging
 import time
 
-from dateutil import tz
-
 from synapse.api.constants import PresenceState
 from synapse.storage.devices import DeviceStore
 from synapse.storage.user_erasure_store import UserErasureStore
@@ -119,7 +117,6 @@ class DataStore(RoomMemberStore, RoomStore,
             db_conn, "device_lists_stream", "stream_id",
         )
 
-        self._transaction_id_gen = IdGenerator(db_conn, "sent_transactions", "id")
         self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
         self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id")
         self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id")
@@ -320,7 +317,7 @@ class DataStore(RoomMemberStore, RoomStore,
                               thirty_days_ago_in_secs))
 
             for row in txn:
-                if row[0] is 'unknown':
+                if row[0] == 'unknown':
                     pass
                 results[row[0]] = row[1]
 
@@ -358,10 +355,11 @@ class DataStore(RoomMemberStore, RoomStore,
         """
         Returns millisecond unixtime for start of UTC day.
         """
-        now = datetime.datetime.utcnow()
-        today_start = datetime.datetime(now.year, now.month,
-                                        now.day, tzinfo=tz.tzutc())
-        return int(time.mktime(today_start.timetuple())) * 1000
+        now = time.gmtime()
+        today_start = calendar.timegm((
+            now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0,
+        ))
+        return today_start * 1000
 
     def generate_user_daily_visits(self):
         """
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index d9d0255d0b..a0333d5309 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -12,6 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import itertools
 import logging
 import sys
 import threading
@@ -26,9 +27,13 @@ from prometheus_client import Histogram
 from twisted.internet import defer
 
 from synapse.api.errors import StoreError
-from synapse.storage.engines import PostgresEngine
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.engines import PostgresEngine, Sqlite3Engine
+from synapse.types import get_domain_from_id
+from synapse.util import batch_iter
 from synapse.util.caches.descriptors import Cache
 from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
+from synapse.util.stringutils import exception_to_unicode
 
 logger = logging.getLogger(__name__)
 
@@ -48,6 +53,25 @@ sql_query_timer = Histogram("synapse_storage_query_time", "sec", ["verb"])
 sql_txn_timer = Histogram("synapse_storage_transaction_time", "sec", ["desc"])
 
 
+# Unique indexes which have been added in background updates. Maps from table name
+# to the name of the background update which added the unique index to that table.
+#
+# This is used by the upsert logic to figure out which tables are safe to do a proper
+# UPSERT on: until the relevant background update has completed, we
+# have to emulate an upsert by locking the table.
+#
+UNIQUE_INDEX_BACKGROUND_UPDATES = {
+    "user_ips": "user_ips_device_unique_index",
+    "device_lists_remote_extremeties": "device_lists_remote_extremeties_unique_idx",
+    "device_lists_remote_cache": "device_lists_remote_cache_unique_idx",
+    "event_search": "event_search_event_id_idx",
+}
+
+# This is a special cache name we use to batch multiple invalidations of caches
+# based on the current state when notifying workers over replication.
+_CURRENT_STATE_CACHE_NAME = "cs_cache_fake"
+
+
 class LoggingTransaction(object):
     """An object that almost-transparently proxies for the 'txn' object
     passed to the constructor. Adds logging and metrics to the .execute()
@@ -83,6 +107,14 @@ class LoggingTransaction(object):
     def __iter__(self):
         return self.txn.__iter__()
 
+    def execute_batch(self, sql, args):
+        if isinstance(self.database_engine, PostgresEngine):
+            from psycopg2.extras import execute_batch
+            self._do_execute(lambda *x: execute_batch(self.txn, *x), sql, args)
+        else:
+            for val in args:
+                self.execute(sql, val)
+
     def execute(self, sql, *args):
         self._do_execute(self.txn.execute, sql, *args)
 
@@ -191,6 +223,57 @@ class SQLBaseStore(object):
 
         self.database_engine = hs.database_engine
 
+        # A set of tables that are not safe to use native upserts in.
+        self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys())
+
+        # We add the user_directory_search table to the blacklist on SQLite
+        # because the existing search table does not have an index, making it
+        # unsafe to use native upserts.
+        if isinstance(self.database_engine, Sqlite3Engine):
+            self._unsafe_to_upsert_tables.add("user_directory_search")
+
+        if self.database_engine.can_native_upsert:
+            # Check ASAP (and then later, every 1s) to see if we have finished
+            # background updates of tables that aren't safe to update.
+            self._clock.call_later(
+                0.0,
+                run_as_background_process,
+                "upsert_safety_check",
+                self._check_safe_to_upsert
+            )
+
+    @defer.inlineCallbacks
+    def _check_safe_to_upsert(self):
+        """
+        Is it safe to use native UPSERT?
+
+        If there are background updates, we will need to wait, as they may be
+        the addition of indexes that set the UNIQUE constraint that we require.
+
+        If the background updates have not completed, wait 15 sec and check again.
+        """
+        updates = yield self._simple_select_list(
+            "background_updates",
+            keyvalues=None,
+            retcols=["update_name"],
+            desc="check_background_updates",
+        )
+        updates = [x["update_name"] for x in updates]
+
+        for table, update_name in UNIQUE_INDEX_BACKGROUND_UPDATES.items():
+            if update_name not in updates:
+                logger.debug("Now safe to upsert in %s", table)
+                self._unsafe_to_upsert_tables.discard(table)
+
+        # If there's any updates still running, reschedule to run.
+        if updates:
+            self._clock.call_later(
+                15.0,
+                run_as_background_process,
+                "upsert_safety_check",
+                self._check_safe_to_upsert
+            )
+
     def start_profiling(self):
         self._previous_loop_ts = self._clock.time_msec()
 
@@ -249,32 +332,32 @@ class SQLBaseStore(object):
                 except self.database_engine.module.OperationalError as e:
                     # This can happen if the database disappears mid
                     # transaction.
-                    logger.warn(
+                    logger.warning(
                         "[TXN OPERROR] {%s} %s %d/%d",
-                        name, e, i, N
+                        name, exception_to_unicode(e), i, N
                     )
                     if i < N:
                         i += 1
                         try:
                             conn.rollback()
                         except self.database_engine.module.Error as e1:
-                            logger.warn(
+                            logger.warning(
                                 "[TXN EROLL] {%s} %s",
-                                name, e1,
+                                name, exception_to_unicode(e1),
                             )
                         continue
                     raise
                 except self.database_engine.module.DatabaseError as e:
                     if self.database_engine.is_deadlock(e):
-                        logger.warn("[TXN DEADLOCK] {%s} %d/%d", name, i, N)
+                        logger.warning("[TXN DEADLOCK] {%s} %d/%d", name, i, N)
                         if i < N:
                             i += 1
                             try:
                                 conn.rollback()
                             except self.database_engine.module.Error as e1:
-                                logger.warn(
+                                logger.warning(
                                     "[TXN EROLL] {%s} %s",
-                                    name, e1,
+                                    name, exception_to_unicode(e1),
                                 )
                             continue
                     raise
@@ -493,8 +576,15 @@ class SQLBaseStore(object):
         txn.executemany(sql, vals)
 
     @defer.inlineCallbacks
-    def _simple_upsert(self, table, keyvalues, values,
-                       insertion_values={}, desc="_simple_upsert", lock=True):
+    def _simple_upsert(
+        self,
+        table,
+        keyvalues,
+        values,
+        insertion_values={},
+        desc="_simple_upsert",
+        lock=True
+    ):
         """
 
         `lock` should generally be set to True (the default), but can be set
@@ -515,16 +605,21 @@ class SQLBaseStore(object):
                 inserting
             lock (bool): True to lock the table when doing the upsert.
         Returns:
-            Deferred(bool): True if a new entry was created, False if an
-                existing one was updated.
+            Deferred(None or bool): Native upserts always return None. Emulated
+            upserts return True if a new entry was created, False if an existing
+            one was updated.
         """
         attempts = 0
         while True:
             try:
                 result = yield self.runInteraction(
                     desc,
-                    self._simple_upsert_txn, table, keyvalues, values, insertion_values,
-                    lock=lock
+                    self._simple_upsert_txn,
+                    table,
+                    keyvalues,
+                    values,
+                    insertion_values,
+                    lock=lock,
                 )
                 defer.returnValue(result)
             except self.database_engine.module.IntegrityError as e:
@@ -536,30 +631,111 @@ class SQLBaseStore(object):
 
                 # presumably we raced with another transaction: let's retry.
                 logger.warn(
-                    "IntegrityError when upserting into %s; retrying: %s",
-                    table, e
+                    "%s when upserting into %s; retrying: %s", e.__name__, table, e
                 )
 
-    def _simple_upsert_txn(self, txn, table, keyvalues, values, insertion_values={},
-                           lock=True):
+    def _simple_upsert_txn(
+        self,
+        txn,
+        table,
+        keyvalues,
+        values,
+        insertion_values={},
+        lock=True,
+    ):
+        """
+        Pick the UPSERT method which works best on the platform. Either the
+        native one (Pg9.5+, recent SQLites), or fall back to an emulated method.
+
+        Args:
+            txn: The transaction to use.
+            table (str): The table to upsert into
+            keyvalues (dict): The unique key tables and their new values
+            values (dict): The nonunique columns and their new values
+            insertion_values (dict): additional key/values to use only when
+                inserting
+            lock (bool): True to lock the table when doing the upsert.
+        Returns:
+            None or bool: Native upserts always return None. Emulated
+            upserts return True if a new entry was created, False if an existing
+            one was updated.
+        """
+        if (
+            self.database_engine.can_native_upsert
+            and table not in self._unsafe_to_upsert_tables
+        ):
+            return self._simple_upsert_txn_native_upsert(
+                txn,
+                table,
+                keyvalues,
+                values,
+                insertion_values=insertion_values,
+            )
+        else:
+            return self._simple_upsert_txn_emulated(
+                txn,
+                table,
+                keyvalues,
+                values,
+                insertion_values=insertion_values,
+                lock=lock,
+            )
+
+    def _simple_upsert_txn_emulated(
+        self, txn, table, keyvalues, values, insertion_values={}, lock=True
+    ):
+        """
+        Args:
+            table (str): The table to upsert into
+            keyvalues (dict): The unique key tables and their new values
+            values (dict): The nonunique columns and their new values
+            insertion_values (dict): additional key/values to use only when
+                inserting
+            lock (bool): True to lock the table when doing the upsert.
+        Returns:
+            bool: Return True if a new entry was created, False if an existing
+            one was updated.
+        """
         # We need to lock the table :(, unless we're *really* careful
         if lock:
             self.database_engine.lock_table(txn, table)
 
-        # First try to update.
-        sql = "UPDATE %s SET %s WHERE %s" % (
-            table,
-            ", ".join("%s = ?" % (k,) for k in values),
-            " AND ".join("%s = ?" % (k,) for k in keyvalues)
-        )
-        sqlargs = list(values.values()) + list(keyvalues.values())
+        def _getwhere(key):
+            # If the value we're passing in is None (aka NULL), we need to use
+            # IS, not =, as NULL = NULL equals NULL (False).
+            if keyvalues[key] is None:
+                return "%s IS ?" % (key,)
+            else:
+                return "%s = ?" % (key,)
+
+        if not values:
+            # If `values` is empty, then all of the values we care about are in
+            # the unique key, so there is nothing to UPDATE. We can just do a
+            # SELECT instead to see if it exists.
+            sql = "SELECT 1 FROM %s WHERE %s" % (
+                table,
+                " AND ".join(_getwhere(k) for k in keyvalues)
+            )
+            sqlargs = list(keyvalues.values())
+            txn.execute(sql, sqlargs)
+            if txn.fetchall():
+                # We have an existing record.
+                return False
+        else:
+            # First try to update.
+            sql = "UPDATE %s SET %s WHERE %s" % (
+                table,
+                ", ".join("%s = ?" % (k,) for k in values),
+                " AND ".join(_getwhere(k) for k in keyvalues)
+            )
+            sqlargs = list(values.values()) + list(keyvalues.values())
 
-        txn.execute(sql, sqlargs)
-        if txn.rowcount > 0:
-            # successfully updated at least one row.
-            return False
+            txn.execute(sql, sqlargs)
+            if txn.rowcount > 0:
+                # successfully updated at least one row.
+                return False
 
-        # We didn't update any rows so insert a new one
+        # We didn't find any existing rows, so insert a new one
         allvalues = {}
         allvalues.update(keyvalues)
         allvalues.update(values)
@@ -568,12 +744,144 @@ class SQLBaseStore(object):
         sql = "INSERT INTO %s (%s) VALUES (%s)" % (
             table,
             ", ".join(k for k in allvalues),
-            ", ".join("?" for _ in allvalues)
+            ", ".join("?" for _ in allvalues),
         )
         txn.execute(sql, list(allvalues.values()))
         # successfully inserted
         return True
 
+    def _simple_upsert_txn_native_upsert(
+        self, txn, table, keyvalues, values, insertion_values={}
+    ):
+        """
+        Use the native UPSERT functionality in recent PostgreSQL versions.
+
+        Args:
+            table (str): The table to upsert into
+            keyvalues (dict): The unique key tables and their new values
+            values (dict): The nonunique columns and their new values
+            insertion_values (dict): additional key/values to use only when
+                inserting
+        Returns:
+            None
+        """
+        allvalues = {}
+        allvalues.update(keyvalues)
+        allvalues.update(values)
+        allvalues.update(insertion_values)
+
+        sql = (
+            "INSERT INTO %s (%s) VALUES (%s) "
+            "ON CONFLICT (%s) DO UPDATE SET %s"
+        ) % (
+            table,
+            ", ".join(k for k in allvalues),
+            ", ".join("?" for _ in allvalues),
+            ", ".join(k for k in keyvalues),
+            ", ".join(k + "=EXCLUDED." + k for k in values),
+        )
+        txn.execute(sql, list(allvalues.values()))
+
+    def _simple_upsert_many_txn(
+        self, txn, table, key_names, key_values, value_names, value_values
+    ):
+        """
+        Upsert, many times.
+
+        Args:
+            table (str): The table to upsert into
+            key_names (list[str]): The key column names.
+            key_values (list[list]): A list of each row's key column values.
+            value_names (list[str]): The value column names. If empty, no
+                values will be used, even if value_values is provided.
+            value_values (list[list]): A list of each row's value column values.
+        Returns:
+            None
+        """
+        if (
+            self.database_engine.can_native_upsert
+            and table not in self._unsafe_to_upsert_tables
+        ):
+            return self._simple_upsert_many_txn_native_upsert(
+                txn, table, key_names, key_values, value_names, value_values
+            )
+        else:
+            return self._simple_upsert_many_txn_emulated(
+                txn, table, key_names, key_values, value_names, value_values
+            )
+
+    def _simple_upsert_many_txn_emulated(
+        self, txn, table, key_names, key_values, value_names, value_values
+    ):
+        """
+        Upsert, many times, but without native UPSERT support or batching.
+
+        Args:
+            table (str): The table to upsert into
+            key_names (list[str]): The key column names.
+            key_values (list[list]): A list of each row's key column values.
+            value_names (list[str]): The value column names. If empty, no
+                values will be used, even if value_values is provided.
+            value_values (list[list]): A list of each row's value column values.
+        Returns:
+            None
+        """
+        # No value columns, therefore make a blank list so that the following
+        # zip() works correctly.
+        if not value_names:
+            value_values = [() for x in range(len(key_values))]
+
+        for keyv, valv in zip(key_values, value_values):
+            _keys = {x: y for x, y in zip(key_names, keyv)}
+            _vals = {x: y for x, y in zip(value_names, valv)}
+
+            self._simple_upsert_txn_emulated(txn, table, _keys, _vals)
+
+    def _simple_upsert_many_txn_native_upsert(
+        self, txn, table, key_names, key_values, value_names, value_values
+    ):
+        """
+        Upsert, many times, using batching where possible.
+
+        Args:
+            table (str): The table to upsert into
+            key_names (list[str]): The key column names.
+            key_values (list[list]): A list of each row's key column values.
+            value_names (list[str]): The value column names. If empty, no
+                values will be used, even if value_values is provided.
+            value_values (list[list]): A list of each row's value column values.
+        Returns:
+            None
+        """
+        allnames = []
+        allnames.extend(key_names)
+        allnames.extend(value_names)
+
+        if not value_names:
+            # No value columns, therefore make a blank list so that the
+            # following zip() works correctly.
+            latter = "NOTHING"
+            value_values = [() for x in range(len(key_values))]
+        else:
+            latter = (
+                "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in value_names)
+            )
+
+        sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s" % (
+            table,
+            ", ".join(k for k in allnames),
+            ", ".join("?" for _ in allnames),
+            ", ".join(key_names),
+            latter,
+        )
+
+        args = []
+
+        for x, y in zip(key_values, value_values):
+            args.append(tuple(x) + tuple(y))
+
+        return txn.execute_batch(sql, args)
+
     def _simple_select_one(self, table, keyvalues, retcols,
                            allow_none=False, desc="_simple_select_one"):
         """Executes a SELECT query on the named table, which is expected to
@@ -849,9 +1157,9 @@ class SQLBaseStore(object):
         rowcount = cls._simple_update_txn(txn, table, keyvalues, updatevalues)
 
         if rowcount == 0:
-            raise StoreError(404, "No row found")
+            raise StoreError(404, "No row found (%s)" % (table,))
         if rowcount > 1:
-            raise StoreError(500, "More than one row matched")
+            raise StoreError(500, "More than one row matched (%s)" % (table,))
 
     @staticmethod
     def _simple_select_one_txn(txn, table, keyvalues, retcols,
@@ -868,9 +1176,9 @@ class SQLBaseStore(object):
         if not row:
             if allow_none:
                 return None
-            raise StoreError(404, "No row found")
+            raise StoreError(404, "No row found (%s)" % (table,))
         if txn.rowcount > 1:
-            raise StoreError(500, "More than one row matched")
+            raise StoreError(500, "More than one row matched (%s)" % (table,))
 
         return dict(zip(retcols, row))
 
@@ -902,9 +1210,9 @@ class SQLBaseStore(object):
 
         txn.execute(sql, list(keyvalues.values()))
         if txn.rowcount == 0:
-            raise StoreError(404, "No row found")
+            raise StoreError(404, "No row found (%s)" % (table,))
         if txn.rowcount > 1:
-            raise StoreError(500, "more than one row matched")
+            raise StoreError(500, "More than one row matched (%s)" % (table,))
 
     def _simple_delete(self, table, keyvalues, desc):
         return self.runInteraction(
@@ -1005,6 +1313,90 @@ class SQLBaseStore(object):
         be invalidated.
         """
         txn.call_after(cache_func.invalidate, keys)
+        self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
+
+    def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed):
+        """Special case invalidation of caches based on current state.
+
+        We special case this so that we can batch the cache invalidations into a
+        single replication poke.
+
+        Args:
+            txn
+            room_id (str): Room where state changed
+            members_changed (iterable[str]): The user_ids of members that have changed
+        """
+        txn.call_after(self._invalidate_state_caches, room_id, members_changed)
+
+        # We need to be careful that the size of the `members_changed` list
+        # isn't so large that it causes problems sending over replication, so we
+        # send them in chunks.
+        # Max line length is 16K, and max user ID length is 255, so 50 should
+        # be safe.
+        for chunk in batch_iter(members_changed, 50):
+            keys = itertools.chain([room_id], chunk)
+            self._send_invalidation_to_replication(
+                txn, _CURRENT_STATE_CACHE_NAME, keys,
+            )
+
+    def _invalidate_state_caches(self, room_id, members_changed):
+        """Invalidates caches that are based on the current state, but does
+        not stream invalidations down replication.
+
+        Args:
+            room_id (str): Room where state changed
+            members_changed (iterable[str]): The user_ids of members that have
+                changed
+        """
+        for member in members_changed:
+            self._attempt_to_invalidate_cache(
+                "get_rooms_for_user_with_stream_ordering", (member,),
+            )
+
+        for host in set(get_domain_from_id(u) for u in members_changed):
+            self._attempt_to_invalidate_cache(
+                "is_host_joined", (room_id, host,),
+            )
+            self._attempt_to_invalidate_cache(
+                "was_host_joined", (room_id, host,),
+            )
+
+        self._attempt_to_invalidate_cache(
+            "get_users_in_room", (room_id,),
+        )
+        self._attempt_to_invalidate_cache(
+            "get_room_summary", (room_id,),
+        )
+        self._attempt_to_invalidate_cache(
+            "get_current_state_ids", (room_id,),
+        )
+
+    def _attempt_to_invalidate_cache(self, cache_name, key):
+        """Attempts to invalidate the cache of the given name, ignoring if the
+        cache doesn't exist. Mainly used for invalidating caches on workers,
+        where they may not have the cache.
+
+        Args:
+            cache_name (str)
+            key (tuple)
+        """
+        try:
+            getattr(self, cache_name).invalidate(key)
+        except AttributeError:
+            # We probably haven't pulled in the cache in this worker,
+            # which is fine.
+            pass
+
+    def _send_invalidation_to_replication(self, txn, cache_name, keys):
+        """Notifies replication that given cache has been invalidated.
+
+        Note that this does *not* invalidate the cache locally.
+
+        Args:
+            txn
+            cache_name (str)
+            keys (iterable[str])
+        """
 
         if isinstance(self.database_engine, PostgresEngine):
             # get_next() returns a context manager which is designed to wrap
@@ -1022,7 +1414,7 @@ class SQLBaseStore(object):
                 table="cache_invalidation_stream",
                 values={
                     "stream_id": stream_id,
-                    "cache_func": cache_func.__name__,
+                    "cache_func": cache_name,
                     "keys": list(keys),
                     "invalidation_ts": self.clock.time_msec(),
                 }
@@ -1211,6 +1603,14 @@ class SQLBaseStore(object):
 
         return cls.cursor_to_dict(txn)
 
+    @property
+    def database_engine_name(self):
+        return self.database_engine.module.__name__
+
+    def get_server_version(self):
+        """Returns a string describing the server version number"""
+        return self.database_engine.server_version
+
 
 class _RollbackButIsFineException(Exception):
     """ This exception is used to rollback a transaction without implying
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 5fe1ca2de7..60cdc884e6 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -240,7 +240,7 @@ class BackgroundUpdateStore(SQLBaseStore):
         * An integer count of the number of items to update in this batch.
 
         The handler should return a deferred integer count of items updated.
-        The hander is responsible for updating the progress of the update.
+        The handler is responsible for updating the progress of the update.
 
         Args:
             update_name(str): The name of the update that this code handles.
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py
index 9ad17b7c25..9c21362226 100644
--- a/synapse/storage/client_ips.py
+++ b/synapse/storage/client_ips.py
@@ -65,7 +65,32 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
             columns=["last_seen"],
         )
 
-        # (user_id, access_token, ip) -> (user_agent, device_id, last_seen)
+        self.register_background_update_handler(
+            "user_ips_analyze",
+            self._analyze_user_ip,
+        )
+
+        self.register_background_update_handler(
+            "user_ips_remove_dupes",
+            self._remove_user_ip_dupes,
+        )
+
+        # Register a unique index
+        self.register_background_index_update(
+            "user_ips_device_unique_index",
+            index_name="user_ips_user_token_ip_unique_index",
+            table="user_ips",
+            columns=["user_id", "access_token", "ip"],
+            unique=True,
+        )
+
+        # Drop the old non-unique index
+        self.register_background_update_handler(
+            "user_ips_drop_nonunique_index",
+            self._remove_user_ip_nonunique,
+        )
+
+        # (user_id, access_token, ip,) -> (user_agent, device_id, last_seen)
         self._batch_row_update = {}
 
         self._client_ip_looper = self._clock.looping_call(
@@ -76,6 +101,205 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
         )
 
     @defer.inlineCallbacks
+    def _remove_user_ip_nonunique(self, progress, batch_size):
+        def f(conn):
+            txn = conn.cursor()
+            txn.execute(
+                "DROP INDEX IF EXISTS user_ips_user_ip"
+            )
+            txn.close()
+
+        yield self.runWithConnection(f)
+        yield self._end_background_update("user_ips_drop_nonunique_index")
+        defer.returnValue(1)
+
+    @defer.inlineCallbacks
+    def _analyze_user_ip(self, progress, batch_size):
+        # Background update to analyze user_ips table before we run the
+        # deduplication background update. The table may not have been analyzed
+        # for ages due to the table locks.
+        #
+        # This will lock out the naive upserts to user_ips while it happens, but
+        # the analyze should be quick (28GB table takes ~10s)
+        def user_ips_analyze(txn):
+            txn.execute("ANALYZE user_ips")
+
+        yield self.runInteraction(
+            "user_ips_analyze", user_ips_analyze
+        )
+
+        yield self._end_background_update("user_ips_analyze")
+
+        defer.returnValue(1)
+
+    @defer.inlineCallbacks
+    def _remove_user_ip_dupes(self, progress, batch_size):
+        # This works function works by scanning the user_ips table in batches
+        # based on `last_seen`. For each row in a batch it searches the rest of
+        # the table to see if there are any duplicates, if there are then they
+        # are removed and replaced with a suitable row.
+
+        # Fetch the start of the batch
+        begin_last_seen = progress.get("last_seen", 0)
+
+        def get_last_seen(txn):
+            txn.execute(
+                """
+                SELECT last_seen FROM user_ips
+                WHERE last_seen > ?
+                ORDER BY last_seen
+                LIMIT 1
+                OFFSET ?
+                """,
+                (begin_last_seen, batch_size)
+            )
+            row = txn.fetchone()
+            if row:
+                return row[0]
+            else:
+                return None
+
+        # Get a last seen that has roughly `batch_size` since `begin_last_seen`
+        end_last_seen = yield self.runInteraction(
+            "user_ips_dups_get_last_seen", get_last_seen
+        )
+
+        # If it returns None, then we're processing the last batch
+        last = end_last_seen is None
+
+        logger.info(
+            "Scanning for duplicate 'user_ips' rows in range: %s <= last_seen < %s",
+            begin_last_seen, end_last_seen,
+        )
+
+        def remove(txn):
+            # This works by looking at all entries in the given time span, and
+            # then for each (user_id, access_token, ip) tuple in that range
+            # checking for any duplicates in the rest of the table (via a join).
+            # It then only returns entries which have duplicates, and the max
+            # last_seen across all duplicates, which can the be used to delete
+            # all other duplicates.
+            # It is efficient due to the existence of (user_id, access_token,
+            # ip) and (last_seen) indices.
+
+            # Define the search space, which requires handling the last batch in
+            # a different way
+            if last:
+                clause = "? <= last_seen"
+                args = (begin_last_seen,)
+            else:
+                clause = "? <= last_seen AND last_seen < ?"
+                args = (begin_last_seen, end_last_seen)
+
+            # (Note: The DISTINCT in the inner query is important to ensure that
+            # the COUNT(*) is accurate, otherwise double counting may happen due
+            # to the join effectively being a cross product)
+            txn.execute(
+                """
+                SELECT user_id, access_token, ip,
+                       MAX(device_id), MAX(user_agent), MAX(last_seen),
+                       COUNT(*)
+                FROM (
+                    SELECT DISTINCT user_id, access_token, ip
+                    FROM user_ips
+                    WHERE {}
+                ) c
+                INNER JOIN user_ips USING (user_id, access_token, ip)
+                GROUP BY user_id, access_token, ip
+                HAVING count(*) > 1
+                """.format(clause),
+                args
+            )
+            res = txn.fetchall()
+
+            # We've got some duplicates
+            for i in res:
+                user_id, access_token, ip, device_id, user_agent, last_seen, count = i
+
+                # We want to delete the duplicates so we end up with only a
+                # single row.
+                #
+                # The naive way of doing this would be just to delete all rows
+                # and reinsert a constructed row. However, if there are a lot of
+                # duplicate rows this can cause the table to grow a lot, which
+                # can be problematic in two ways:
+                #   1. If user_ips is already large then this can cause the
+                #      table to rapidly grow, potentially filling the disk.
+                #   2. Reinserting a lot of rows can confuse the table
+                #      statistics for postgres, causing it to not use the
+                #      correct indices for the query above, resulting in a full
+                #      table scan. This is incredibly slow for large tables and
+                #      can kill database performance. (This seems to mainly
+                #      happen for the last query where the clause is simply `? <
+                #      last_seen`)
+                #
+                # So instead we want to delete all but *one* of the duplicate
+                # rows. That is hard to do reliably, so we cheat and do a two
+                # step process:
+                #   1. Delete all rows with a last_seen strictly less than the
+                #      max last_seen. This hopefully results in deleting all but
+                #      one row the majority of the time, but there may be
+                #      duplicate last_seen
+                #   2. If multiple rows remain, we fall back to the naive method
+                #      and simply delete all rows and reinsert.
+                #
+                # Note that this relies on no new duplicate rows being inserted,
+                # but if that is happening then this entire process is futile
+                # anyway.
+
+                # Do step 1:
+
+                txn.execute(
+                    """
+                    DELETE FROM user_ips
+                    WHERE user_id = ? AND access_token = ? AND ip = ? AND last_seen < ?
+                    """,
+                    (user_id, access_token, ip, last_seen)
+                )
+                if txn.rowcount == count - 1:
+                    # We deleted all but one of the duplicate rows, i.e. there
+                    # is exactly one remaining and so there is nothing left to
+                    # do.
+                    continue
+                elif txn.rowcount >= count:
+                    raise Exception(
+                        "We deleted more duplicate rows from 'user_ips' than expected",
+                    )
+
+                # The previous step didn't delete enough rows, so we fallback to
+                # step 2:
+
+                # Drop all the duplicates
+                txn.execute(
+                    """
+                    DELETE FROM user_ips
+                    WHERE user_id = ? AND access_token = ? AND ip = ?
+                    """,
+                    (user_id, access_token, ip)
+                )
+
+                # Add in one to be the last_seen
+                txn.execute(
+                    """
+                    INSERT INTO user_ips
+                    (user_id, access_token, ip, device_id, user_agent, last_seen)
+                    VALUES (?, ?, ?, ?, ?, ?)
+                    """,
+                    (user_id, access_token, ip, device_id, user_agent, last_seen)
+                )
+
+            self._background_update_progress_txn(
+                txn, "user_ips_remove_dupes", {"last_seen": end_last_seen}
+            )
+
+        yield self.runInteraction("user_ips_dups_remove", remove)
+
+        if last:
+            yield self._end_background_update("user_ips_remove_dupes")
+
+        defer.returnValue(batch_size)
+
+    @defer.inlineCallbacks
     def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id,
                          now=None):
         if not now:
@@ -114,7 +338,10 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
         )
 
     def _update_client_ips_batch_txn(self, txn, to_update):
-        self.database_engine.lock_table(txn, "user_ips")
+        if "user_ips" in self._unsafe_to_upsert_tables or (
+            not self.database_engine.can_native_upsert
+        ):
+            self.database_engine.lock_table(txn, "user_ips")
 
         for entry in iteritems(to_update):
             (user_id, access_token, ip), (user_agent, device_id, last_seen) = entry
@@ -127,10 +354,10 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
                         "user_id": user_id,
                         "access_token": access_token,
                         "ip": ip,
-                        "user_agent": user_agent,
-                        "device_id": device_id,
                     },
                     values={
+                        "user_agent": user_agent,
+                        "device_id": device_id,
                         "last_seen": last_seen,
                     },
                     lock=False,
@@ -227,7 +454,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
         results = {}
 
         for key in self._batch_row_update:
-            uid, access_token, ip = key
+            uid, access_token, ip, = key
             if uid == user_id:
                 user_agent, _, last_seen = self._batch_row_update[key]
                 results[(access_token, ip)] = (user_agent, last_seen)
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
index d10ff9e4b9..ecdab34e7d 100644
--- a/synapse/storage/devices.py
+++ b/synapse/storage/devices.py
@@ -22,14 +22,19 @@ from twisted.internet import defer
 
 from synapse.api.errors import StoreError
 from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.background_updates import BackgroundUpdateStore
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
 
-from ._base import Cache, SQLBaseStore, db_to_json
+from ._base import Cache, db_to_json
 
 logger = logging.getLogger(__name__)
 
+DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = (
+    "drop_device_list_streams_non_unique_indexes"
+)
 
-class DeviceStore(SQLBaseStore):
+
+class DeviceStore(BackgroundUpdateStore):
     def __init__(self, db_conn, hs):
         super(DeviceStore, self).__init__(db_conn, hs)
 
@@ -52,6 +57,30 @@ class DeviceStore(SQLBaseStore):
             columns=["user_id", "device_id"],
         )
 
+        # create a unique index on device_lists_remote_cache
+        self.register_background_index_update(
+            "device_lists_remote_cache_unique_idx",
+            index_name="device_lists_remote_cache_unique_id",
+            table="device_lists_remote_cache",
+            columns=["user_id", "device_id"],
+            unique=True,
+        )
+
+        # And one on device_lists_remote_extremeties
+        self.register_background_index_update(
+            "device_lists_remote_extremeties_unique_idx",
+            index_name="device_lists_remote_extremeties_unique_idx",
+            table="device_lists_remote_extremeties",
+            columns=["user_id"],
+            unique=True,
+        )
+
+        # once they complete, we can remove the old non-unique indexes.
+        self.register_background_update_handler(
+            DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES,
+            self._drop_device_list_streams_non_unique_indexes,
+        )
+
     @defer.inlineCallbacks
     def store_device(self, user_id, device_id,
                      initial_device_display_name):
@@ -239,7 +268,19 @@ class DeviceStore(SQLBaseStore):
 
     def update_remote_device_list_cache_entry(self, user_id, device_id, content,
                                               stream_id):
-        """Updates a single user's device in the cache.
+        """Updates a single device in the cache of a remote user's devicelist.
+
+        Note: assumes that we are the only thread that can be updating this user's
+        device list.
+
+        Args:
+            user_id (str): User to update device list for
+            device_id (str): ID of decivice being updated
+            content (dict): new data on this device
+            stream_id (int): the version of the device list
+
+        Returns:
+            Deferred[None]
         """
         return self.runInteraction(
             "update_remote_device_list_cache_entry",
@@ -272,7 +313,11 @@ class DeviceStore(SQLBaseStore):
                 },
                 values={
                     "content": json.dumps(content),
-                }
+                },
+
+                # we don't need to lock, because we assume we are the only thread
+                # updating this user's devices.
+                lock=False,
             )
 
         txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id,))
@@ -289,11 +334,26 @@ class DeviceStore(SQLBaseStore):
             },
             values={
                 "stream_id": stream_id,
-            }
+            },
+
+            # again, we can assume we are the only thread updating this user's
+            # extremity.
+            lock=False,
         )
 
     def update_remote_device_list_cache(self, user_id, devices, stream_id):
-        """Replace the cache of the remote user's devices.
+        """Replace the entire cache of the remote user's devices.
+
+        Note: assumes that we are the only thread that can be updating this user's
+        device list.
+
+        Args:
+            user_id (str): User to update device list for
+            devices (list[dict]): list of device objects supplied over federation
+            stream_id (int): the version of the device list
+
+        Returns:
+            Deferred[None]
         """
         return self.runInteraction(
             "update_remote_device_list_cache",
@@ -338,7 +398,11 @@ class DeviceStore(SQLBaseStore):
             },
             values={
                 "stream_id": stream_id,
-            }
+            },
+
+            # we don't need to lock, because we can assume we are the only thread
+            # updating this user's extremity.
+            lock=False,
         )
 
     def get_devices_by_remote(self, destination, from_stream_id):
@@ -589,10 +653,14 @@ class DeviceStore(SQLBaseStore):
         combined list of changes to devices, and which destinations need to be
         poked. `destination` may be None if no destinations need to be poked.
         """
+        # We do a group by here as there can be a large number of duplicate
+        # entries, since we throw away device IDs.
         sql = """
-            SELECT stream_id, user_id, destination FROM device_lists_stream
+            SELECT MAX(stream_id) AS stream_id, user_id, destination
+            FROM device_lists_stream
             LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id)
             WHERE ? < stream_id AND stream_id <= ?
+            GROUP BY user_id, destination
         """
         return self._execute(
             "get_all_device_list_changes_for_remotes", None,
@@ -718,3 +786,19 @@ class DeviceStore(SQLBaseStore):
             "_prune_old_outbound_device_pokes",
             _prune_txn,
         )
+
+    @defer.inlineCallbacks
+    def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size):
+        def f(conn):
+            txn = conn.cursor()
+            txn.execute(
+                "DROP INDEX IF EXISTS device_lists_remote_cache_id"
+            )
+            txn.execute(
+                "DROP INDEX IF EXISTS device_lists_remote_extremeties_id"
+            )
+            txn.close()
+
+        yield self.runWithConnection(f)
+        yield self._end_background_update(DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES)
+        defer.returnValue(1)
diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py
index f25ded2295..9a3aec759e 100644
--- a/synapse/storage/e2e_room_keys.py
+++ b/synapse/storage/e2e_room_keys.py
@@ -118,6 +118,11 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             these room keys.
         """
 
+        try:
+            version = int(version)
+        except ValueError:
+            defer.returnValue({'rooms': {}})
+
         keyvalues = {
             "user_id": user_id,
             "version": version,
@@ -177,7 +182,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
 
         keyvalues = {
             "user_id": user_id,
-            "version": version,
+            "version": int(version),
         }
         if room_id:
             keyvalues['room_id'] = room_id
@@ -212,14 +217,23 @@ class EndToEndRoomKeyStore(SQLBaseStore):
         Raises:
             StoreError: with code 404 if there are no e2e_room_keys_versions present
         Returns:
-            A deferred dict giving the info metadata for this backup version
+            A deferred dict giving the info metadata for this backup version, with
+            fields including:
+                version(str)
+                algorithm(str)
+                auth_data(object): opaque dict supplied by the client
         """
 
         def _get_e2e_room_keys_version_info_txn(txn):
             if version is None:
                 this_version = self._get_current_version(txn, user_id)
             else:
-                this_version = version
+                try:
+                    this_version = int(version)
+                except ValueError:
+                    # Our versions are all ints so if we can't convert it to an integer,
+                    # it isn't there.
+                    raise StoreError(404, "No row found")
 
             result = self._simple_select_one_txn(
                 txn,
@@ -236,6 +250,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 ),
             )
             result["auth_data"] = json.loads(result["auth_data"])
+            result["version"] = str(result["version"])
             return result
 
         return self.runInteraction(
@@ -283,6 +298,27 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             "create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn
         )
 
+    def update_e2e_room_keys_version(self, user_id, version, info):
+        """Update a given backup version
+
+        Args:
+            user_id(str): the user whose backup version we're updating
+            version(str): the version ID of the backup version we're updating
+            info(dict): the new backup version info to store
+        """
+
+        return self._simple_update(
+            table="e2e_room_keys_versions",
+            keyvalues={
+                "user_id": user_id,
+                "version": version,
+            },
+            updatevalues={
+                "auth_data": json.dumps(info["auth_data"]),
+            },
+            desc="update_e2e_room_keys_version"
+        )
+
     def delete_e2e_room_keys_version(self, user_id, version=None):
         """Delete a given backup version of the user's room keys.
         Doesn't delete their actual key data.
diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py
index 1f1721e820..2a0f6cfca9 100644
--- a/synapse/storage/end_to_end_keys.py
+++ b/synapse/storage/end_to_end_keys.py
@@ -40,7 +40,10 @@ class EndToEndKeyStore(SQLBaseStore):
                 allow_none=True,
             )
 
-            new_key_json = encode_canonical_json(device_keys)
+            # In py3 we need old_key_json to match new_key_json type. The DB
+            # returns unicode while encode_canonical_json returns bytes.
+            new_key_json = encode_canonical_json(device_keys).decode("utf-8")
+
             if old_key_json == new_key_json:
                 return False
 
diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py
index e2f9de8451..ff5ef97ca8 100644
--- a/synapse/storage/engines/__init__.py
+++ b/synapse/storage/engines/__init__.py
@@ -18,7 +18,7 @@ import platform
 
 from ._base import IncorrectDatabaseSetup
 from .postgres import PostgresEngine
-from .sqlite3 import Sqlite3Engine
+from .sqlite import Sqlite3Engine
 
 SUPPORTED_MODULE = {
     "sqlite3": Sqlite3Engine,
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 42225f8a2a..dc3238501c 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -23,6 +23,7 @@ class PostgresEngine(object):
         self.module = database_module
         self.module.extensions.register_type(self.module.extensions.UNICODE)
         self.synchronous_commit = database_config.get("synchronous_commit", True)
+        self._version = None   # unknown as yet
 
     def check_database(self, txn):
         txn.execute("SHOW SERVER_ENCODING")
@@ -38,6 +39,13 @@ class PostgresEngine(object):
         return sql.replace("?", "%s")
 
     def on_new_connection(self, db_conn):
+
+        # Get the version of PostgreSQL that we're using. As per the psycopg2
+        # docs: The number is formed by converting the major, minor, and
+        # revision numbers into two-decimal-digit numbers and appending them
+        # together. For example, version 8.1.5 will be returned as 80105
+        self._version = db_conn.server_version
+
         db_conn.set_isolation_level(
             self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
         )
@@ -54,6 +62,13 @@ class PostgresEngine(object):
 
         cursor.close()
 
+    @property
+    def can_native_upsert(self):
+        """
+        Can we use native UPSERTs? This requires PostgreSQL 9.5+.
+        """
+        return self._version >= 90500
+
     def is_deadlock(self, error):
         if isinstance(error, self.module.DatabaseError):
             # https://www.postgresql.org/docs/current/static/errcodes-appendix.html
@@ -73,3 +88,27 @@ class PostgresEngine(object):
         """
         txn.execute("SELECT nextval('state_group_id_seq')")
         return txn.fetchone()[0]
+
+    @property
+    def server_version(self):
+        """Returns a string giving the server version. For example: '8.1.5'
+
+        Returns:
+            string
+        """
+        # note that this is a bit of a hack because it relies on on_new_connection
+        # having been called at least once. Still, that should be a safe bet here.
+        numver = self._version
+        assert numver is not None
+
+        # https://www.postgresql.org/docs/current/libpq-status.html#LIBPQ-PQSERVERVERSION
+        if numver >= 100000:
+            return "%i.%i" % (
+                numver / 10000, numver % 10000,
+            )
+        else:
+            return "%i.%i.%i" % (
+                numver / 10000,
+                (numver % 10000) / 100,
+                numver % 100,
+            )
diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite.py
index 19949fc474..1bcd5b99a4 100644
--- a/synapse/storage/engines/sqlite3.py
+++ b/synapse/storage/engines/sqlite.py
@@ -30,6 +30,14 @@ class Sqlite3Engine(object):
         self._current_state_group_id = None
         self._current_state_group_id_lock = threading.Lock()
 
+    @property
+    def can_native_upsert(self):
+        """
+        Do we support native UPSERTs? This requires SQLite3 3.24+, plus some
+        more work we haven't done yet to tell what was inserted vs updated.
+        """
+        return self.module.sqlite_version_info >= (3, 24, 0)
+
     def check_database(self, txn):
         pass
 
@@ -62,6 +70,15 @@ class Sqlite3Engine(object):
             self._current_state_group_id += 1
             return self._current_state_group_id
 
+    @property
+    def server_version(self):
+        """Gets a string giving the server version. For example: '3.22.0'
+
+        Returns:
+            string
+        """
+        return "%i.%i.%i" % self.module.sqlite_version_info
+
 
 # Following functions taken from: https://github.com/coleifer/peewee
 
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index 3faca2a042..38809ed0fc 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -125,6 +125,29 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
 
         return dict(txn)
 
+    @defer.inlineCallbacks
+    def get_max_depth_of(self, event_ids):
+        """Returns the max depth of a set of event IDs
+
+        Args:
+            event_ids (list[str])
+
+        Returns
+            Deferred[int]
+        """
+        rows = yield self._simple_select_many_batch(
+            table="events",
+            column="event_id",
+            iterable=event_ids,
+            retcols=("depth",),
+            desc="get_max_depth_of",
+        )
+
+        if not rows:
+            defer.returnValue(0)
+        else:
+            defer.returnValue(max(row["depth"] for row in rows))
+
     def _get_oldest_events_in_room_txn(self, txn, room_id):
         return self._simple_select_onecol_txn(
             txn,
@@ -477,7 +500,7 @@ class EventFederationStore(EventFederationWorkerStore):
                     "is_state": False,
                 }
                 for ev in events
-                for e_id, _ in ev.prev_events
+                for e_id in ev.prev_event_ids()
             ],
         )
 
@@ -510,7 +533,7 @@ class EventFederationStore(EventFederationWorkerStore):
 
         txn.executemany(query, [
             (e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id, False)
-            for ev in events for e_id, _ in ev.prev_events
+            for ev in events for e_id in ev.prev_event_ids()
             if not ev.internal_metadata.is_outlier()
         ])
 
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 8881b009df..06db9e56e6 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -38,6 +38,7 @@ from synapse.state import StateResolutionStore
 from synapse.storage.background_updates import BackgroundUpdateStore
 from synapse.storage.event_federation import EventFederationStore
 from synapse.storage.events_worker import EventsWorkerStore
+from synapse.storage.state import StateGroupWorkerStore
 from synapse.types import RoomStreamToken, get_domain_from_id
 from synapse.util import batch_iter
 from synapse.util.async_helpers import ObservableDeferred
@@ -205,7 +206,8 @@ def _retry_on_integrity_error(func):
 
 # inherits from EventFederationStore so that we can call _update_backward_extremities
 # and _handle_mult_prev_events (though arguably those could both be moved in here)
-class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore):
+class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore,
+                  BackgroundUpdateStore):
     EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
     EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
 
@@ -414,7 +416,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
                             )
                             if len_1:
                                 all_single_prev_not_state = all(
-                                    len(event.prev_events) == 1
+                                    len(event.prev_event_ids()) == 1
                                     and not event.is_state()
                                     for event, ctx in ev_ctx_rm
                                 )
@@ -438,7 +440,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
                                 # guess this by looking at the prev_events and checking
                                 # if they match the current forward extremities.
                                 for ev, _ in ev_ctx_rm:
-                                    prev_event_ids = set(e for e, _ in ev.prev_events)
+                                    prev_event_ids = set(ev.prev_event_ids())
                                     if latest_event_ids == prev_event_ids:
                                         state_delta_reuse_delta_counter.inc()
                                         break
@@ -549,7 +551,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
         result.difference_update(
             e_id
             for event in new_events
-            for e_id, _ in event.prev_events
+            for e_id in event.prev_event_ids()
         )
 
         # Finally, remove any events which are prev_events of any existing events.
@@ -737,7 +739,18 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
         }
 
         events_map = {ev.event_id: ev for ev, _ in events_context}
-        room_version = yield self.get_room_version(room_id)
+
+        # We need to get the room version, which is in the create event.
+        # Normally that'd be in the database, but its also possible that we're
+        # currently trying to persist it.
+        room_version = None
+        for ev, _ in events_context:
+            if ev.type == EventTypes.Create and ev.state_key == "":
+                room_version = ev.content.get("room_version", "1")
+                break
+
+        if not room_version:
+            room_version = yield self.get_room_version(room_id)
 
         logger.debug("calling resolve_state_groups from preserve_events")
         res = yield self._state_resolution_handler.resolve_state_groups(
@@ -867,7 +880,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
                     "auth_id": auth_id,
                 }
                 for event, _ in events_and_contexts
-                for auth_id, _ in event.auth_events
+                for auth_id in event.auth_event_ids()
                 if event.is_state()
             ],
         )
@@ -891,105 +904,82 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
 
     def _update_current_state_txn(self, txn, state_delta_by_room, max_stream_order):
         for room_id, current_state_tuple in iteritems(state_delta_by_room):
-                to_delete, to_insert = current_state_tuple
-
-                # First we add entries to the current_state_delta_stream. We
-                # do this before updating the current_state_events table so
-                # that we can use it to calculate the `prev_event_id`. (This
-                # allows us to not have to pull out the existing state
-                # unnecessarily).
-                sql = """
-                    INSERT INTO current_state_delta_stream
-                    (stream_id, room_id, type, state_key, event_id, prev_event_id)
-                    SELECT ?, ?, ?, ?, ?, (
-                        SELECT event_id FROM current_state_events
-                        WHERE room_id = ? AND type = ? AND state_key = ?
-                    )
-                """
-                txn.executemany(sql, (
-                    (
-                        max_stream_order, room_id, etype, state_key, None,
-                        room_id, etype, state_key,
-                    )
-                    for etype, state_key in to_delete
-                    # We sanity check that we're deleting rather than updating
-                    if (etype, state_key) not in to_insert
-                ))
-                txn.executemany(sql, (
-                    (
-                        max_stream_order, room_id, etype, state_key, ev_id,
-                        room_id, etype, state_key,
-                    )
-                    for (etype, state_key), ev_id in iteritems(to_insert)
-                ))
-
-                # Now we actually update the current_state_events table
+            to_delete, to_insert = current_state_tuple
 
-                txn.executemany(
-                    "DELETE FROM current_state_events"
-                    " WHERE room_id = ? AND type = ? AND state_key = ?",
-                    (
-                        (room_id, etype, state_key)
-                        for etype, state_key in itertools.chain(to_delete, to_insert)
-                    ),
+            # First we add entries to the current_state_delta_stream. We
+            # do this before updating the current_state_events table so
+            # that we can use it to calculate the `prev_event_id`. (This
+            # allows us to not have to pull out the existing state
+            # unnecessarily).
+            sql = """
+                INSERT INTO current_state_delta_stream
+                (stream_id, room_id, type, state_key, event_id, prev_event_id)
+                SELECT ?, ?, ?, ?, ?, (
+                    SELECT event_id FROM current_state_events
+                    WHERE room_id = ? AND type = ? AND state_key = ?
                 )
-
-                self._simple_insert_many_txn(
-                    txn,
-                    table="current_state_events",
-                    values=[
-                        {
-                            "event_id": ev_id,
-                            "room_id": room_id,
-                            "type": key[0],
-                            "state_key": key[1],
-                        }
-                        for key, ev_id in iteritems(to_insert)
-                    ],
+            """
+            txn.executemany(sql, (
+                (
+                    max_stream_order, room_id, etype, state_key, None,
+                    room_id, etype, state_key,
                 )
-
-                txn.call_after(
-                    self._curr_state_delta_stream_cache.entity_has_changed,
-                    room_id, max_stream_order,
+                for etype, state_key in to_delete
+                # We sanity check that we're deleting rather than updating
+                if (etype, state_key) not in to_insert
+            ))
+            txn.executemany(sql, (
+                (
+                    max_stream_order, room_id, etype, state_key, ev_id,
+                    room_id, etype, state_key,
                 )
+                for (etype, state_key), ev_id in iteritems(to_insert)
+            ))
 
-                # Invalidate the various caches
-
-                # Figure out the changes of membership to invalidate the
-                # `get_rooms_for_user` cache.
-                # We find out which membership events we may have deleted
-                # and which we have added, then we invlidate the caches for all
-                # those users.
-                members_changed = set(
-                    state_key
-                    for ev_type, state_key in itertools.chain(to_delete, to_insert)
-                    if ev_type == EventTypes.Member
-                )
+            # Now we actually update the current_state_events table
 
-                for member in members_changed:
-                    self._invalidate_cache_and_stream(
-                        txn, self.get_rooms_for_user_with_stream_ordering, (member,)
-                    )
+            txn.executemany(
+                "DELETE FROM current_state_events"
+                " WHERE room_id = ? AND type = ? AND state_key = ?",
+                (
+                    (room_id, etype, state_key)
+                    for etype, state_key in itertools.chain(to_delete, to_insert)
+                ),
+            )
 
-                for host in set(get_domain_from_id(u) for u in members_changed):
-                    self._invalidate_cache_and_stream(
-                        txn, self.is_host_joined, (room_id, host)
-                    )
-                    self._invalidate_cache_and_stream(
-                        txn, self.was_host_joined, (room_id, host)
-                    )
+            self._simple_insert_many_txn(
+                txn,
+                table="current_state_events",
+                values=[
+                    {
+                        "event_id": ev_id,
+                        "room_id": room_id,
+                        "type": key[0],
+                        "state_key": key[1],
+                    }
+                    for key, ev_id in iteritems(to_insert)
+                ],
+            )
 
-                self._invalidate_cache_and_stream(
-                    txn, self.get_users_in_room, (room_id,)
-                )
+            txn.call_after(
+                self._curr_state_delta_stream_cache.entity_has_changed,
+                room_id, max_stream_order,
+            )
 
-                self._invalidate_cache_and_stream(
-                    txn, self.get_room_summary, (room_id,)
-                )
+            # Invalidate the various caches
+
+            # Figure out the changes of membership to invalidate the
+            # `get_rooms_for_user` cache.
+            # We find out which membership events we may have deleted
+            # and which we have added, then we invlidate the caches for all
+            # those users.
+            members_changed = set(
+                state_key
+                for ev_type, state_key in itertools.chain(to_delete, to_insert)
+                if ev_type == EventTypes.Member
+            )
 
-                self._invalidate_cache_and_stream(
-                    txn, self.get_current_state_ids, (room_id,)
-                )
+            self._invalidate_state_caches_and_stream(txn, room_id, members_changed)
 
     def _update_forward_extremities_txn(self, txn, new_forward_extremities,
                                         max_stream_order):
@@ -1255,6 +1245,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
                         event.internal_metadata.get_dict()
                     ),
                     "json": encode_json(event_dict(event)),
+                    "format_version": event.format_version,
                 }
                 for event, _ in events_and_contexts
             ],
@@ -2034,55 +2025,37 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
 
         logger.info("[purge] finding redundant state groups")
 
-        # Get all state groups that are only referenced by events that are
-        # to be deleted.
-        # This works by first getting state groups that we may want to delete,
-        # joining against event_to_state_groups to get events that use that
-        # state group, then left joining against events_to_purge again. Any
-        # state group where the left join produce *no nulls* are referenced
-        # only by events that are going to be purged.
+        # Get all state groups that are referenced by events that are to be
+        # deleted. We then go and check if they are referenced by other events
+        # or state groups, and if not we delete them.
         txn.execute("""
-            SELECT state_group FROM
-            (
-                SELECT DISTINCT state_group FROM events_to_purge
-                INNER JOIN event_to_state_groups USING (event_id)
-            ) AS sp
-            INNER JOIN event_to_state_groups USING (state_group)
-            LEFT JOIN events_to_purge AS ep USING (event_id)
-            GROUP BY state_group
-            HAVING SUM(CASE WHEN ep.event_id IS NULL THEN 1 ELSE 0 END) = 0
+            SELECT DISTINCT state_group FROM events_to_purge
+            INNER JOIN event_to_state_groups USING (event_id)
         """)
 
-        state_rows = txn.fetchall()
-        logger.info("[purge] found %i redundant state groups", len(state_rows))
-
-        # make a set of the redundant state groups, so that we can look them up
-        # efficiently
-        state_groups_to_delete = set([sg for sg, in state_rows])
-
-        # Now we get all the state groups that rely on these state groups
-        logger.info("[purge] finding state groups which depend on redundant"
-                    " state groups")
-        remaining_state_groups = []
-        for i in range(0, len(state_rows), 100):
-            chunk = [sg for sg, in state_rows[i:i + 100]]
-            # look for state groups whose prev_state_group is one we are about
-            # to delete
-            rows = self._simple_select_many_txn(
-                txn,
-                table="state_group_edges",
-                column="prev_state_group",
-                iterable=chunk,
-                retcols=["state_group"],
-                keyvalues={},
-            )
-            remaining_state_groups.extend(
-                row["state_group"] for row in rows
+        referenced_state_groups = set(sg for sg, in txn)
+        logger.info(
+            "[purge] found %i referenced state groups",
+            len(referenced_state_groups),
+        )
+
+        logger.info("[purge] finding state groups that can be deleted")
 
-                # exclude state groups we are about to delete: no point in
-                # updating them
-                if row["state_group"] not in state_groups_to_delete
+        state_groups_to_delete, remaining_state_groups = (
+            self._find_unreferenced_groups_during_purge(
+                txn, referenced_state_groups,
             )
+        )
+
+        logger.info(
+            "[purge] found %i state groups to delete",
+            len(state_groups_to_delete),
+        )
+
+        logger.info(
+            "[purge] de-delta-ing %i remaining state groups",
+            len(remaining_state_groups),
+        )
 
         # Now we turn the state groups that reference to-be-deleted state
         # groups to non delta versions.
@@ -2127,11 +2100,11 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
         logger.info("[purge] removing redundant state groups")
         txn.executemany(
             "DELETE FROM state_groups_state WHERE state_group = ?",
-            state_rows
+            ((sg,) for sg in state_groups_to_delete),
         )
         txn.executemany(
             "DELETE FROM state_groups WHERE id = ?",
-            state_rows
+            ((sg,) for sg in state_groups_to_delete),
         )
 
         logger.info("[purge] removing events from event_to_state_groups")
@@ -2227,6 +2200,85 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
 
         logger.info("[purge] done")
 
+    def _find_unreferenced_groups_during_purge(self, txn, state_groups):
+        """Used when purging history to figure out which state groups can be
+        deleted and which need to be de-delta'ed (due to one of its prev groups
+        being scheduled for deletion).
+
+        Args:
+            txn
+            state_groups (set[int]): Set of state groups referenced by events
+                that are going to be deleted.
+
+        Returns:
+            tuple[set[int], set[int]]: The set of state groups that can be
+            deleted and the set of state groups that need to be de-delta'ed
+        """
+        # Graph of state group -> previous group
+        graph = {}
+
+        # Set of events that we have found to be referenced by events
+        referenced_groups = set()
+
+        # Set of state groups we've already seen
+        state_groups_seen = set(state_groups)
+
+        # Set of state groups to handle next.
+        next_to_search = set(state_groups)
+        while next_to_search:
+            # We bound size of groups we're looking up at once, to stop the
+            # SQL query getting too big
+            if len(next_to_search) < 100:
+                current_search = next_to_search
+                next_to_search = set()
+            else:
+                current_search = set(itertools.islice(next_to_search, 100))
+                next_to_search -= current_search
+
+            # Check if state groups are referenced
+            sql = """
+                SELECT DISTINCT state_group FROM event_to_state_groups
+                LEFT JOIN events_to_purge AS ep USING (event_id)
+                WHERE state_group IN (%s) AND ep.event_id IS NULL
+            """ % (",".join("?" for _ in current_search),)
+            txn.execute(sql, list(current_search))
+
+            referenced = set(sg for sg, in txn)
+            referenced_groups |= referenced
+
+            # We don't continue iterating up the state group graphs for state
+            # groups that are referenced.
+            current_search -= referenced
+
+            rows = self._simple_select_many_txn(
+                txn,
+                table="state_group_edges",
+                column="prev_state_group",
+                iterable=current_search,
+                keyvalues={},
+                retcols=("prev_state_group", "state_group",),
+            )
+
+            prevs = set(row["state_group"] for row in rows)
+            # We don't bother re-handling groups we've already seen
+            prevs -= state_groups_seen
+            next_to_search |= prevs
+            state_groups_seen |= prevs
+
+            for row in rows:
+                # Note: Each state group can have at most one prev group
+                graph[row["state_group"]] = row["prev_state_group"]
+
+        to_delete = state_groups_seen - referenced_groups
+
+        to_dedelta = set()
+        for sg in referenced_groups:
+            prev_sg = graph.get(sg)
+            if prev_sg and prev_sg in to_delete:
+                to_dedelta.add(sg)
+
+        return to_delete, to_dedelta
+
     @defer.inlineCallbacks
     def is_event_after(self, event_id1, event_id2):
         """Returns True if event_id1 is after event_id2 in the stream
diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py
index a8326f5296..1716be529a 100644
--- a/synapse/storage/events_worker.py
+++ b/synapse/storage/events_worker.py
@@ -21,13 +21,14 @@ from canonicaljson import json
 
 from twisted.internet import defer
 
+from synapse.api.constants import EventFormatVersions, EventTypes
 from synapse.api.errors import NotFoundError
+from synapse.events import FrozenEvent, event_type_from_format_version  # noqa: F401
 # these are only included to make the type annotations work
-from synapse.events import EventBase  # noqa: F401
-from synapse.events import FrozenEvent
 from synapse.events.snapshot import EventContext  # noqa: F401
 from synapse.events.utils import prune_event
 from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.types import get_domain_from_id
 from synapse.util.logcontext import (
     LoggingContext,
     PreserveLoggingContext,
@@ -160,9 +161,14 @@ class EventsWorkerStore(SQLBaseStore):
             log_ctx = LoggingContext.current_context()
             log_ctx.record_event_fetch(len(missing_events_ids))
 
+            # Note that _enqueue_events is also responsible for turning db rows
+            # into FrozenEvents (via _get_event_from_row), which involves seeing if
+            # the events have been redacted, and if so pulling the redaction event out
+            # of the database to check it.
+            #
+            # _enqueue_events is a bit of a rubbish name but naming is hard.
             missing_events = yield self._enqueue_events(
                 missing_events_ids,
-                check_redacted=check_redacted,
                 allow_rejected=allow_rejected,
             )
 
@@ -174,6 +180,50 @@ class EventsWorkerStore(SQLBaseStore):
             if not entry:
                 continue
 
+            # Starting in room version v3, some redactions need to be rechecked if we
+            # didn't have the redacted event at the time, so we recheck on read
+            # instead.
+            if not allow_rejected and entry.event.type == EventTypes.Redaction:
+                if entry.event.internal_metadata.need_to_check_redaction():
+                    # XXX: we need to avoid calling get_event here.
+                    #
+                    # The problem is that we end up at this point when an event
+                    # which has been redacted is pulled out of the database by
+                    # _enqueue_events, because _enqueue_events needs to check the
+                    # redaction before it can cache the redacted event. So obviously,
+                    # calling get_event to get the redacted event out of the database
+                    # gives us an infinite loop.
+                    #
+                    # For now (quick hack to fix during 0.99 release cycle), we just
+                    # go and fetch the relevant row from the db, but it would be nice
+                    # to think about how we can cache this rather than hit the db
+                    # every time we access a redaction event.
+                    #
+                    # One thought on how to do this:
+                    #  1. split _get_events up so that it is divided into (a) get the
+                    #     rawish event from the db/cache, (b) do the redaction/rejection
+                    #     filtering
+                    #  2. have _get_event_from_row just call the first half of that
+
+                    orig_sender = yield self._simple_select_one_onecol(
+                        table="events",
+                        keyvalues={"event_id": entry.event.redacts},
+                        retcol="sender",
+                        allow_none=True,
+                    )
+
+                    expected_domain = get_domain_from_id(entry.event.sender)
+                    if orig_sender and get_domain_from_id(orig_sender) == expected_domain:
+                        # This redaction event is allowed. Mark as not needing a
+                        # recheck.
+                        entry.event.internal_metadata.recheck_redaction = False
+                    else:
+                        # We don't have the event that is being redacted, so we
+                        # assume that the event isn't authorized for now. (If we
+                        # later receive the event, then we will always redact
+                        # it anyway, since we have this redaction)
+                        continue
+
             if allow_rejected or not entry.event.rejected_reason:
                 if check_redacted and entry.redacted_event:
                     event = entry.redacted_event
@@ -197,7 +247,7 @@ class EventsWorkerStore(SQLBaseStore):
         defer.returnValue(events)
 
     def _invalidate_get_event_cache(self, event_id):
-            self._get_event_cache.invalidate((event_id,))
+        self._get_event_cache.invalidate((event_id,))
 
     def _get_events_from_cache(self, events, allow_rejected, update_metrics=True):
         """Fetch events from the caches
@@ -310,7 +360,7 @@ class EventsWorkerStore(SQLBaseStore):
                     self.hs.get_reactor().callFromThread(fire, event_list, e)
 
     @defer.inlineCallbacks
-    def _enqueue_events(self, events, check_redacted=True, allow_rejected=False):
+    def _enqueue_events(self, events, allow_rejected=False):
         """Fetches events from the database using the _event_fetch_list. This
         allows batch and bulk fetching of events - it allows us to fetch events
         without having to create a new transaction for each request for events.
@@ -353,6 +403,7 @@ class EventsWorkerStore(SQLBaseStore):
                     self._get_event_from_row,
                     row["internal_metadata"], row["json"], row["redacts"],
                     rejected_reason=row["rejects"],
+                    format_version=row["format_version"],
                 )
                 for row in rows
             ],
@@ -377,6 +428,7 @@ class EventsWorkerStore(SQLBaseStore):
                 " e.event_id as event_id, "
                 " e.internal_metadata,"
                 " e.json,"
+                " e.format_version, "
                 " r.redacts as redacts,"
                 " rej.event_id as rejects "
                 " FROM event_json as e"
@@ -392,7 +444,7 @@ class EventsWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def _get_event_from_row(self, internal_metadata, js, redacted,
-                            rejected_reason=None):
+                            format_version, rejected_reason=None):
         with Measure(self._clock, "_get_event_from_row"):
             d = json.loads(js)
             internal_metadata = json.loads(internal_metadata)
@@ -405,8 +457,13 @@ class EventsWorkerStore(SQLBaseStore):
                     desc="_get_event_from_row_rejected_reason",
                 )
 
-            original_ev = FrozenEvent(
-                d,
+            if format_version is None:
+                # This means that we stored the event before we had the concept
+                # of a event format version, so it must be a V1 event.
+                format_version = EventFormatVersions.V1
+
+            original_ev = event_type_from_format_version(format_version)(
+                event_dict=d,
                 internal_metadata_dict=internal_metadata,
                 rejected_reason=rejected_reason,
             )
@@ -436,6 +493,19 @@ class EventsWorkerStore(SQLBaseStore):
                     # will serialise this field correctly
                     redacted_event.unsigned["redacted_because"] = because
 
+                    # Starting in room version v3, some redactions need to be
+                    # rechecked if we didn't have the redacted event at the
+                    # time, so we recheck on read instead.
+                    if because.internal_metadata.need_to_check_redaction():
+                        expected_domain = get_domain_from_id(original_ev.sender)
+                        if get_domain_from_id(because.sender) == expected_domain:
+                            # This redaction event is allowed. Mark as not needing a
+                            # recheck.
+                            because.internal_metadata.recheck_redaction = False
+                        else:
+                            # Senders don't match, so the event isn't actually redacted
+                            redacted_event = None
+
             cache_entry = _EventCacheEntry(
                 event=original_ev,
                 redacted_event=redacted_event,
diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py
index cf4104dc2e..9e7e09b8c1 100644
--- a/synapse/storage/monthly_active_users.py
+++ b/synapse/storage/monthly_active_users.py
@@ -34,8 +34,9 @@ class MonthlyActiveUsersStore(SQLBaseStore):
         self.hs = hs
         self.reserved_users = ()
         # Do not add more reserved users than the total allowable number
-        self._initialise_reserved_users(
-            dbconn.cursor(),
+        self._new_transaction(
+            dbconn, "initialise_mau_threepids", [], [],
+            self._initialise_reserved_users,
             hs.config.mau_limits_reserved_threepids[:self.hs.config.max_mau_value],
         )
 
@@ -54,9 +55,12 @@ class MonthlyActiveUsersStore(SQLBaseStore):
                 txn,
                 tp["medium"], tp["address"]
             )
+
             if user_id:
-                self.upsert_monthly_active_user_txn(txn, user_id)
-                reserved_user_list.append(user_id)
+                is_support = self.is_support_user_txn(txn, user_id)
+                if not is_support:
+                    self.upsert_monthly_active_user_txn(txn, user_id)
+                    reserved_user_list.append(user_id)
             else:
                 logger.warning(
                     "mau limit reserved threepid %s not found in db" % tp
@@ -96,37 +100,38 @@ class MonthlyActiveUsersStore(SQLBaseStore):
 
             txn.execute(sql, query_args)
 
-            # If MAU user count still exceeds the MAU threshold, then delete on
-            # a least recently active basis.
-            # Note it is not possible to write this query using OFFSET due to
-            # incompatibilities in how sqlite and postgres support the feature.
-            # sqlite requires 'LIMIT -1 OFFSET ?', the LIMIT must be present
-            # While Postgres does not require 'LIMIT', but also does not support
-            # negative LIMIT values. So there is no way to write it that both can
-            # support
-            safe_guard = self.hs.config.max_mau_value - len(self.reserved_users)
-            # Must be greater than zero for postgres
-            safe_guard = safe_guard if safe_guard > 0 else 0
-            query_args = [safe_guard]
-
-            base_sql = """
-                DELETE FROM monthly_active_users
-                WHERE user_id NOT IN (
-                    SELECT user_id FROM monthly_active_users
-                    ORDER BY timestamp DESC
-                    LIMIT ?
+            if self.hs.config.limit_usage_by_mau:
+                # If MAU user count still exceeds the MAU threshold, then delete on
+                # a least recently active basis.
+                # Note it is not possible to write this query using OFFSET due to
+                # incompatibilities in how sqlite and postgres support the feature.
+                # sqlite requires 'LIMIT -1 OFFSET ?', the LIMIT must be present
+                # While Postgres does not require 'LIMIT', but also does not support
+                # negative LIMIT values. So there is no way to write it that both can
+                # support
+                safe_guard = self.hs.config.max_mau_value - len(self.reserved_users)
+                # Must be greater than zero for postgres
+                safe_guard = safe_guard if safe_guard > 0 else 0
+                query_args = [safe_guard]
+
+                base_sql = """
+                    DELETE FROM monthly_active_users
+                    WHERE user_id NOT IN (
+                        SELECT user_id FROM monthly_active_users
+                        ORDER BY timestamp DESC
+                        LIMIT ?
+                        )
+                    """
+                # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres
+                # when len(reserved_users) == 0. Works fine on sqlite.
+                if len(self.reserved_users) > 0:
+                    query_args.extend(self.reserved_users)
+                    sql = base_sql + """ AND user_id NOT IN ({})""".format(
+                        ','.join(questionmarks)
                     )
-                """
-            # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres
-            # when len(reserved_users) == 0. Works fine on sqlite.
-            if len(self.reserved_users) > 0:
-                query_args.extend(self.reserved_users)
-                sql = base_sql + """ AND user_id NOT IN ({})""".format(
-                    ','.join(questionmarks)
-                )
-            else:
-                sql = base_sql
-            txn.execute(sql, query_args)
+                else:
+                    sql = base_sql
+                txn.execute(sql, query_args)
 
         yield self.runInteraction("reap_monthly_active_users", _reap_users)
         # It seems poor to invalidate the whole cache, Postgres supports
@@ -180,15 +185,33 @@ class MonthlyActiveUsersStore(SQLBaseStore):
         Args:
             user_id (str): user to add/update
         """
-        is_insert = yield self.runInteraction(
+        # Support user never to be included in MAU stats. Note I can't easily call this
+        # from upsert_monthly_active_user_txn because then I need a _txn form of
+        # is_support_user which is complicated because I want to cache the result.
+        # Therefore I call it here and ignore the case where
+        # upsert_monthly_active_user_txn is called directly from
+        # _initialise_reserved_users reasoning that it would be very strange to
+        #  include a support user in this context.
+
+        is_support = yield self.is_support_user(user_id)
+        if is_support:
+            return
+
+        yield self.runInteraction(
             "upsert_monthly_active_user", self.upsert_monthly_active_user_txn,
             user_id
         )
 
-        if is_insert:
-            self.user_last_seen_monthly_active.invalidate((user_id,))
+        user_in_mau = self.user_last_seen_monthly_active.cache.get(
+            (user_id,),
+            None,
+            update_metrics=False
+        )
+        if user_in_mau is None:
             self.get_monthly_active_count.invalidate(())
 
+        self.user_last_seen_monthly_active.invalidate((user_id,))
+
     def upsert_monthly_active_user_txn(self, txn, user_id):
         """Updates or inserts monthly active user member
 
@@ -198,6 +221,16 @@ class MonthlyActiveUsersStore(SQLBaseStore):
         in a database thread rather than the main thread, and we can't call
         txn.call_after because txn may not be a LoggingTransaction.
 
+        We consciously do not call is_support_txn from this method because it
+        is not possible to cache the response. is_support_txn will be false in
+        almost all cases, so it seems reasonable to call it only for
+        upsert_monthly_active_user and to call is_support_txn manually
+        for cases where upsert_monthly_active_user_txn is called directly,
+        like _initialise_reserved_users
+
+        In short, don't call this method with support users. (Support users
+        should not appear in the MAU stats).
+
         Args:
             txn (cursor):
             user_id (str): user to add/update
@@ -206,6 +239,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
             bool: True if a new entry was created, False if an
             existing one was updated.
         """
+
         # Am consciously deciding to lock the table on the basis that is ought
         # never be a big table and alternative approaches (batching multiple
         # upserts into a single txn) introduced a lot of extra complexity.
@@ -252,8 +286,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
         Args:
             user_id(str): the user_id to query
         """
-
-        if self.hs.config.limit_usage_by_mau:
+        if self.hs.config.limit_usage_by_mau or self.hs.config.mau_stats_only:
             # Trial users and guests should not be included as part of MAU group
             is_guest = yield self.is_guest(user_id)
             if is_guest:
@@ -271,8 +304,14 @@ class MonthlyActiveUsersStore(SQLBaseStore):
             # but only update if we have not previously seen the user for
             # LAST_SEEN_GRANULARITY ms
             if last_seen_timestamp is None:
-                count = yield self.get_monthly_active_count()
-                if count < self.hs.config.max_mau_value:
+                # In the case where mau_stats_only is True and limit_usage_by_mau is
+                # False, there is no point in checking get_monthly_active_count - it
+                # adds no value and will break the logic if max_mau_value is exceeded.
+                if not self.hs.config.limit_usage_by_mau:
                     yield self.upsert_monthly_active_user(user_id)
+                else:
+                    count = yield self.get_monthly_active_count()
+                    if count < self.hs.config.max_mau_value:
+                        yield self.upsert_monthly_active_user(user_id)
             elif now - last_seen_timestamp > LAST_SEEN_GRANULARITY:
                 yield self.upsert_monthly_active_user(user_id)
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index b364719312..fa36daac52 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
 
 # Remember to update this number every time a change is made to database
 # schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 51
+SCHEMA_VERSION = 53
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
@@ -257,7 +257,7 @@ def _upgrade_existing_database(cur, current_version, applied_delta_files,
                 module.run_create(cur, database_engine)
                 if not is_empty:
                     module.run_upgrade(cur, database_engine, config=config)
-            elif ext == ".pyc":
+            elif ext == ".pyc" or file_name == "__pycache__":
                 # Sometimes .pyc files turn up anyway even though we've
                 # disabled their generation; e.g. from distribution package
                 # installers. Silently skip it
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index 2743b52bad..134297e284 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -215,7 +215,7 @@ class PusherStore(PusherWorkerStore):
         with self._pushers_id_gen.get_next() as stream_id:
             # no need to lock because `pushers` has a unique key on
             # (app_id, pushkey, user_name) so _simple_upsert will retry
-            newly_inserted = yield self._simple_upsert(
+            yield self._simple_upsert(
                 table="pushers",
                 keyvalues={
                     "app_id": app_id,
@@ -238,7 +238,12 @@ class PusherStore(PusherWorkerStore):
                 lock=False,
             )
 
-            if newly_inserted:
+            user_has_pusher = self.get_if_user_has_pusher.cache.get(
+                (user_id,), None, update_metrics=False
+            )
+
+            if user_has_pusher is not True:
+                # invalidate, since we the user might not have had a pusher before
                 yield self.runInteraction(
                     "add_pusher",
                     self._invalidate_cache_and_stream,
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 65061f4c61..9b9572890b 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -19,9 +19,11 @@ from six.moves import range
 
 from twisted.internet import defer
 
+from synapse.api.constants import UserTypes
 from synapse.api.errors import Codes, StoreError
 from synapse.storage import background_updates
 from synapse.storage._base import SQLBaseStore
+from synapse.types import UserID
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 
 
@@ -112,6 +114,187 @@ class RegistrationWorkerStore(SQLBaseStore):
 
         return None
 
+    @cachedInlineCallbacks()
+    def is_support_user(self, user_id):
+        """Determines if the user is of type UserTypes.SUPPORT
+
+        Args:
+            user_id (str): user id to test
+
+        Returns:
+            Deferred[bool]: True if user is of type UserTypes.SUPPORT
+        """
+        res = yield self.runInteraction(
+            "is_support_user", self.is_support_user_txn, user_id
+        )
+        defer.returnValue(res)
+
+    def is_support_user_txn(self, txn, user_id):
+        res = self._simple_select_one_onecol_txn(
+            txn=txn,
+            table="users",
+            keyvalues={"name": user_id},
+            retcol="user_type",
+            allow_none=True,
+        )
+        return True if res == UserTypes.SUPPORT else False
+
+    def get_users_by_id_case_insensitive(self, user_id):
+        """Gets users that match user_id case insensitively.
+        Returns a mapping of user_id -> password_hash.
+        """
+        def f(txn):
+            sql = (
+                "SELECT name, password_hash FROM users"
+                " WHERE lower(name) = lower(?)"
+            )
+            txn.execute(sql, (user_id,))
+            return dict(txn)
+
+        return self.runInteraction("get_users_by_id_case_insensitive", f)
+
+    @defer.inlineCallbacks
+    def count_all_users(self):
+        """Counts all users registered on the homeserver."""
+        def _count_users(txn):
+            txn.execute("SELECT COUNT(*) AS users FROM users")
+            rows = self.cursor_to_dict(txn)
+            if rows:
+                return rows[0]["users"]
+            return 0
+
+        ret = yield self.runInteraction("count_users", _count_users)
+        defer.returnValue(ret)
+
+    def count_daily_user_type(self):
+        """
+        Counts 1) native non guest users
+               2) native guests users
+               3) bridged users
+        who registered on the homeserver in the past 24 hours
+        """
+        def _count_daily_user_type(txn):
+            yesterday = int(self._clock.time()) - (60 * 60 * 24)
+
+            sql = """
+                SELECT user_type, COALESCE(count(*), 0) AS count FROM (
+                    SELECT
+                    CASE
+                        WHEN is_guest=0 AND appservice_id IS NULL THEN 'native'
+                        WHEN is_guest=1 AND appservice_id IS NULL THEN 'guest'
+                        WHEN is_guest=0 AND appservice_id IS NOT NULL THEN 'bridged'
+                    END AS user_type
+                    FROM users
+                    WHERE creation_ts > ?
+                ) AS t GROUP BY user_type
+            """
+            results = {'native': 0, 'guest': 0, 'bridged': 0}
+            txn.execute(sql, (yesterday,))
+            for row in txn:
+                results[row[0]] = row[1]
+            return results
+        return self.runInteraction("count_daily_user_type", _count_daily_user_type)
+
+    @defer.inlineCallbacks
+    def count_nonbridged_users(self):
+        def _count_users(txn):
+            txn.execute("""
+                SELECT COALESCE(COUNT(*), 0) FROM users
+                WHERE appservice_id IS NULL
+            """)
+            count, = txn.fetchone()
+            return count
+
+        ret = yield self.runInteraction("count_users", _count_users)
+        defer.returnValue(ret)
+
+    @defer.inlineCallbacks
+    def find_next_generated_user_id_localpart(self):
+        """
+        Gets the localpart of the next generated user ID.
+
+        Generated user IDs are integers, and we aim for them to be as small as
+        we can. Unfortunately, it's possible some of them are already taken by
+        existing users, and there may be gaps in the already taken range. This
+        function returns the start of the first allocatable gap. This is to
+        avoid the case of ID 10000000 being pre-allocated, so us wasting the
+        first (and shortest) many generated user IDs.
+        """
+        def _find_next_generated_user_id(txn):
+            txn.execute("SELECT name FROM users")
+
+            regex = re.compile(r"^@(\d+):")
+
+            found = set()
+
+            for user_id, in txn:
+                match = regex.search(user_id)
+                if match:
+                    found.add(int(match.group(1)))
+            for i in range(len(found) + 1):
+                if i not in found:
+                    return i
+
+        defer.returnValue((yield self.runInteraction(
+            "find_next_generated_user_id",
+            _find_next_generated_user_id
+        )))
+
+    @defer.inlineCallbacks
+    def get_3pid_guest_access_token(self, medium, address):
+        ret = yield self._simple_select_one(
+            "threepid_guest_access_tokens",
+            {
+                "medium": medium,
+                "address": address
+            },
+            ["guest_access_token"], True, 'get_3pid_guest_access_token'
+        )
+        if ret:
+            defer.returnValue(ret["guest_access_token"])
+        defer.returnValue(None)
+
+    @defer.inlineCallbacks
+    def get_user_id_by_threepid(self, medium, address):
+        """Returns user id from threepid
+
+        Args:
+            medium (str): threepid medium e.g. email
+            address (str): threepid address e.g. me@example.com
+
+        Returns:
+            Deferred[str|None]: user id or None if no user id/threepid mapping exists
+        """
+        user_id = yield self.runInteraction(
+            "get_user_id_by_threepid", self.get_user_id_by_threepid_txn,
+            medium, address
+        )
+        defer.returnValue(user_id)
+
+    def get_user_id_by_threepid_txn(self, txn, medium, address):
+        """Returns user id from threepid
+
+        Args:
+            txn (cursor):
+            medium (str): threepid medium e.g. email
+            address (str): threepid address e.g. me@example.com
+
+        Returns:
+            str|None: user id or None if no user id/threepid mapping exists
+        """
+        ret = self._simple_select_one_txn(
+            txn,
+            "user_threepids",
+            {
+                "medium": medium,
+                "address": address
+            },
+            ['user_id'], True
+        )
+        if ret:
+            return ret['user_id']
+        return None
+
 
 class RegistrationStore(RegistrationWorkerStore,
                         background_updates.BackgroundUpdateStore):
@@ -167,7 +350,7 @@ class RegistrationStore(RegistrationWorkerStore,
 
     def register(self, user_id, token=None, password_hash=None,
                  was_guest=False, make_guest=False, appservice_id=None,
-                 admin=False):
+                 create_profile_with_displayname=None, admin=False, user_type=None):
         """Attempts to register an account.
 
         Args:
@@ -181,6 +364,12 @@ class RegistrationStore(RegistrationWorkerStore,
             make_guest (boolean): True if the the new user should be guest,
                 false to add a regular user account.
             appservice_id (str): The ID of the appservice registering the user.
+            create_profile_with_displayname (unicode): Optionally create a profile for
+                the user, setting their displayname to the given value
+            admin (boolean): is an admin user?
+            user_type (str|None): type of user. One of the values from
+                api.constants.UserTypes, or None for a normal user.
+
         Raises:
             StoreError if the user_id could not be registered.
         """
@@ -193,7 +382,9 @@ class RegistrationStore(RegistrationWorkerStore,
             was_guest,
             make_guest,
             appservice_id,
-            admin
+            create_profile_with_displayname,
+            admin,
+            user_type
         )
 
     def _register(
@@ -205,8 +396,12 @@ class RegistrationStore(RegistrationWorkerStore,
         was_guest,
         make_guest,
         appservice_id,
+        create_profile_with_displayname,
         admin,
+        user_type,
     ):
+        user_id_obj = UserID.from_string(user_id)
+
         now = int(self.clock.time())
 
         next_id = self._access_tokens_id_gen.get_next()
@@ -240,6 +435,7 @@ class RegistrationStore(RegistrationWorkerStore,
                         "is_guest": 1 if make_guest else 0,
                         "appservice_id": appservice_id,
                         "admin": 1 if admin else 0,
+                        "user_type": user_type,
                     }
                 )
             else:
@@ -253,6 +449,7 @@ class RegistrationStore(RegistrationWorkerStore,
                         "is_guest": 1 if make_guest else 0,
                         "appservice_id": appservice_id,
                         "admin": 1 if admin else 0,
+                        "user_type": user_type,
                     }
                 )
         except self.database_engine.module.IntegrityError:
@@ -269,25 +466,22 @@ class RegistrationStore(RegistrationWorkerStore,
                 (next_id, user_id, token,)
             )
 
+        if create_profile_with_displayname:
+            # set a default displayname serverside to avoid ugly race
+            # between auto-joins and clients trying to set displaynames
+            #
+            # *obviously* the 'profiles' table uses localpart for user_id
+            # while everything else uses the full mxid.
+            txn.execute(
+                "INSERT INTO profiles(user_id, displayname) VALUES (?,?)",
+                (user_id_obj.localpart, create_profile_with_displayname)
+            )
+
         self._invalidate_cache_and_stream(
             txn, self.get_user_by_id, (user_id,)
         )
         txn.call_after(self.is_guest.invalidate, (user_id,))
 
-    def get_users_by_id_case_insensitive(self, user_id):
-        """Gets users that match user_id case insensitively.
-        Returns a mapping of user_id -> password_hash.
-        """
-        def f(txn):
-            sql = (
-                "SELECT name, password_hash FROM users"
-                " WHERE lower(name) = lower(?)"
-            )
-            txn.execute(sql, (user_id,))
-            return dict(txn)
-
-        return self.runInteraction("get_users_by_id_case_insensitive", f)
-
     def user_set_password_hash(self, user_id, password_hash):
         """
         NB. This does *not* evict any cache because the one use for this
@@ -460,47 +654,6 @@ class RegistrationStore(RegistrationWorkerStore,
         )
         defer.returnValue(ret)
 
-    @defer.inlineCallbacks
-    def get_user_id_by_threepid(self, medium, address):
-        """Returns user id from threepid
-
-        Args:
-            medium (str): threepid medium e.g. email
-            address (str): threepid address e.g. me@example.com
-
-        Returns:
-            Deferred[str|None]: user id or None if no user id/threepid mapping exists
-        """
-        user_id = yield self.runInteraction(
-            "get_user_id_by_threepid", self.get_user_id_by_threepid_txn,
-            medium, address
-        )
-        defer.returnValue(user_id)
-
-    def get_user_id_by_threepid_txn(self, txn, medium, address):
-        """Returns user id from threepid
-
-        Args:
-            txn (cursor):
-            medium (str): threepid medium e.g. email
-            address (str): threepid address e.g. me@example.com
-
-        Returns:
-            str|None: user id or None if no user id/threepid mapping exists
-        """
-        ret = self._simple_select_one_txn(
-            txn,
-            "user_threepids",
-            {
-                "medium": medium,
-                "address": address
-            },
-            ['user_id'], True
-        )
-        if ret:
-            return ret['user_id']
-        return None
-
     def user_delete_threepid(self, user_id, medium, address):
         return self._simple_delete(
             "user_threepids",
@@ -513,107 +666,6 @@ class RegistrationStore(RegistrationWorkerStore,
         )
 
     @defer.inlineCallbacks
-    def count_all_users(self):
-        """Counts all users registered on the homeserver."""
-        def _count_users(txn):
-            txn.execute("SELECT COUNT(*) AS users FROM users")
-            rows = self.cursor_to_dict(txn)
-            if rows:
-                return rows[0]["users"]
-            return 0
-
-        ret = yield self.runInteraction("count_users", _count_users)
-        defer.returnValue(ret)
-
-    def count_daily_user_type(self):
-        """
-        Counts 1) native non guest users
-               2) native guests users
-               3) bridged users
-        who registered on the homeserver in the past 24 hours
-        """
-        def _count_daily_user_type(txn):
-            yesterday = int(self._clock.time()) - (60 * 60 * 24)
-
-            sql = """
-                SELECT user_type, COALESCE(count(*), 0) AS count FROM (
-                    SELECT
-                    CASE
-                        WHEN is_guest=0 AND appservice_id IS NULL THEN 'native'
-                        WHEN is_guest=1 AND appservice_id IS NULL THEN 'guest'
-                        WHEN is_guest=0 AND appservice_id IS NOT NULL THEN 'bridged'
-                    END AS user_type
-                    FROM users
-                    WHERE creation_ts > ?
-                ) AS t GROUP BY user_type
-            """
-            results = {'native': 0, 'guest': 0, 'bridged': 0}
-            txn.execute(sql, (yesterday,))
-            for row in txn:
-                results[row[0]] = row[1]
-            return results
-        return self.runInteraction("count_daily_user_type", _count_daily_user_type)
-
-    @defer.inlineCallbacks
-    def count_nonbridged_users(self):
-        def _count_users(txn):
-            txn.execute("""
-                SELECT COALESCE(COUNT(*), 0) FROM users
-                WHERE appservice_id IS NULL
-            """)
-            count, = txn.fetchone()
-            return count
-
-        ret = yield self.runInteraction("count_users", _count_users)
-        defer.returnValue(ret)
-
-    @defer.inlineCallbacks
-    def find_next_generated_user_id_localpart(self):
-        """
-        Gets the localpart of the next generated user ID.
-
-        Generated user IDs are integers, and we aim for them to be as small as
-        we can. Unfortunately, it's possible some of them are already taken by
-        existing users, and there may be gaps in the already taken range. This
-        function returns the start of the first allocatable gap. This is to
-        avoid the case of ID 10000000 being pre-allocated, so us wasting the
-        first (and shortest) many generated user IDs.
-        """
-        def _find_next_generated_user_id(txn):
-            txn.execute("SELECT name FROM users")
-
-            regex = re.compile(r"^@(\d+):")
-
-            found = set()
-
-            for user_id, in txn:
-                match = regex.search(user_id)
-                if match:
-                    found.add(int(match.group(1)))
-            for i in range(len(found) + 1):
-                if i not in found:
-                    return i
-
-        defer.returnValue((yield self.runInteraction(
-            "find_next_generated_user_id",
-            _find_next_generated_user_id
-        )))
-
-    @defer.inlineCallbacks
-    def get_3pid_guest_access_token(self, medium, address):
-        ret = yield self._simple_select_one(
-            "threepid_guest_access_tokens",
-            {
-                "medium": medium,
-                "address": address
-            },
-            ["guest_access_token"], True, 'get_3pid_guest_access_token'
-        )
-        if ret:
-            defer.returnValue(ret["guest_access_token"])
-        defer.returnValue(None)
-
-    @defer.inlineCallbacks
     def save_or_get_3pid_guest_access_token(
             self, medium, address, access_token, inviter_user_id
     ):
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index 61013b8919..41c65e112a 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -47,7 +47,7 @@ class RoomWorkerStore(SQLBaseStore):
         Args:
             room_id (str): The ID of the room to retrieve.
         Returns:
-            A namedtuple containing the room information, or an empty list.
+            A dict containing the room information, or None if the room is unknown.
         """
         return self._simple_select_one(
             table="rooms",
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 0707f9a86a..592c1bcd33 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -588,12 +588,12 @@ class RoomMemberStore(RoomMemberWorkerStore):
             )
 
             # We update the local_invites table only if the event is "current",
-            # i.e., its something that has just happened.
-            # The only current event that can also be an outlier is if its an
-            # invite that has come in across federation.
+            # i.e., its something that has just happened. If the event is an
+            # outlier it is only current if its an "out of band membership",
+            # like a remote invite or a rejection of a remote invite.
             is_new_state = not backfilled and (
                 not event.internal_metadata.is_outlier()
-                or event.internal_metadata.is_invite_from_remote()
+                or event.internal_metadata.is_out_of_band_membership()
             )
             is_mine = self.hs.is_mine_id(event.state_key)
             if is_new_state and is_mine:
diff --git a/synapse/storage/schema/delta/40/device_list_streams.sql b/synapse/storage/schema/delta/40/device_list_streams.sql
index 54841b3843..dd6dcb65f1 100644
--- a/synapse/storage/schema/delta/40/device_list_streams.sql
+++ b/synapse/storage/schema/delta/40/device_list_streams.sql
@@ -20,9 +20,6 @@ CREATE TABLE device_lists_remote_cache (
     content TEXT NOT NULL
 );
 
-CREATE INDEX device_lists_remote_cache_id ON device_lists_remote_cache(user_id, device_id);
-
-
 -- The last update we got for a user. Empty if we're not receiving updates for
 -- that user.
 CREATE TABLE device_lists_remote_extremeties (
@@ -30,7 +27,11 @@ CREATE TABLE device_lists_remote_extremeties (
     stream_id TEXT NOT NULL
 );
 
-CREATE INDEX device_lists_remote_extremeties_id ON device_lists_remote_extremeties(user_id, stream_id);
+-- we used to create non-unique indexes on these tables, but as of update 52 we create
+-- unique indexes concurrently:
+--
+-- CREATE INDEX device_lists_remote_cache_id ON device_lists_remote_cache(user_id, device_id);
+-- CREATE INDEX device_lists_remote_extremeties_id ON device_lists_remote_extremeties(user_id, stream_id);
 
 
 -- Stream of device lists updates. Includes both local and remotes
diff --git a/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql
new file mode 100644
index 0000000000..91e03d13e1
--- /dev/null
+++ b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql
@@ -0,0 +1,19 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- This is needed to efficiently check for unreferenced state groups during
+-- purge. Added events_to_state_group(state_group) index
+INSERT into background_updates (update_name, progress_json)
+    VALUES ('event_to_state_groups_sg_index', '{}');
diff --git a/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql b/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql
new file mode 100644
index 0000000000..bfa49e6f92
--- /dev/null
+++ b/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql
@@ -0,0 +1,36 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- register a background update which will create a unique index on
+-- device_lists_remote_cache
+INSERT into background_updates (update_name, progress_json)
+    VALUES ('device_lists_remote_cache_unique_idx', '{}');
+
+-- and one on device_lists_remote_extremeties
+INSERT into background_updates (update_name, progress_json, depends_on)
+    VALUES (
+        'device_lists_remote_extremeties_unique_idx', '{}',
+
+        -- doesn't really depend on this, but we need to make sure both happen
+        -- before we drop the old indexes.
+        'device_lists_remote_cache_unique_idx'
+    );
+
+-- once they complete, we can drop the old indexes.
+INSERT into background_updates (update_name, progress_json, depends_on)
+    VALUES (
+        'drop_device_list_streams_non_unique_indexes', '{}',
+        'device_lists_remote_extremeties_unique_idx'
+    );
diff --git a/synapse/storage/schema/delta/52/e2e_room_keys.sql b/synapse/storage/schema/delta/52/e2e_room_keys.sql
new file mode 100644
index 0000000000..db687cccae
--- /dev/null
+++ b/synapse/storage/schema/delta/52/e2e_room_keys.sql
@@ -0,0 +1,53 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Change version column to an integer so we can do MAX() sensibly
+ */
+CREATE TABLE e2e_room_keys_versions_new (
+    user_id TEXT NOT NULL,
+    version BIGINT NOT NULL,
+    algorithm TEXT NOT NULL,
+    auth_data TEXT NOT NULL,
+    deleted SMALLINT DEFAULT 0 NOT NULL
+);
+
+INSERT INTO e2e_room_keys_versions_new
+    SELECT user_id, CAST(version as BIGINT), algorithm, auth_data, deleted FROM e2e_room_keys_versions;
+
+DROP TABLE e2e_room_keys_versions;
+ALTER TABLE e2e_room_keys_versions_new RENAME TO e2e_room_keys_versions;
+
+CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version);
+
+/* Change e2e_rooms_keys to match
+ */
+CREATE TABLE e2e_room_keys_new (
+    user_id TEXT NOT NULL,
+    room_id TEXT NOT NULL,
+    session_id TEXT NOT NULL,
+    version BIGINT NOT NULL,
+    first_message_index INT,
+    forwarded_count INT,
+    is_verified BOOLEAN,
+    session_data TEXT NOT NULL
+);
+
+INSERT INTO e2e_room_keys_new
+    SELECT user_id, room_id, session_id, CAST(version as BIGINT), first_message_index, forwarded_count, is_verified, session_data FROM e2e_room_keys;
+
+DROP TABLE e2e_room_keys;
+ALTER TABLE e2e_room_keys_new RENAME TO e2e_room_keys;
+
+CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id);
diff --git a/synapse/storage/schema/delta/53/add_user_type_to_users.sql b/synapse/storage/schema/delta/53/add_user_type_to_users.sql
new file mode 100644
index 0000000000..88ec2f83e5
--- /dev/null
+++ b/synapse/storage/schema/delta/53/add_user_type_to_users.sql
@@ -0,0 +1,19 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* The type of the user: NULL for a regular user, or one of the constants in 
+ * synapse.api.constants.UserTypes
+ */
+ALTER TABLE users ADD COLUMN user_type TEXT DEFAULT NULL;
diff --git a/synapse/storage/schema/delta/11/v11.sql b/synapse/storage/schema/delta/53/drop_sent_transactions.sql
index e7b4f90127..e372f5a44a 100644
--- a/synapse/storage/schema/delta/11/v11.sql
+++ b/synapse/storage/schema/delta/53/drop_sent_transactions.sql
@@ -1,4 +1,4 @@
-/* Copyright 2015, 2016 OpenMarket Ltd
+/* Copyright 2018 New Vector Ltd
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,4 +13,4 @@
  * limitations under the License.
  */
 
-CREATE INDEX IF NOT EXISTS sent_transaction_txn_id ON sent_transactions(transaction_id);
\ No newline at end of file
+DROP TABLE IF EXISTS sent_transactions;
diff --git a/synapse/storage/schema/delta/53/event_format_version.sql b/synapse/storage/schema/delta/53/event_format_version.sql
new file mode 100644
index 0000000000..1d977c2834
--- /dev/null
+++ b/synapse/storage/schema/delta/53/event_format_version.sql
@@ -0,0 +1,16 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE event_json ADD COLUMN format_version INTEGER;
diff --git a/synapse/storage/schema/delta/53/user_ips_index.sql b/synapse/storage/schema/delta/53/user_ips_index.sql
new file mode 100644
index 0000000000..b812c5794f
--- /dev/null
+++ b/synapse/storage/schema/delta/53/user_ips_index.sql
@@ -0,0 +1,30 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ -- analyze user_ips, to help ensure the correct indices are used
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('user_ips_analyze', '{}');
+
+-- delete duplicates
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+  ('user_ips_remove_dupes', '{}', 'user_ips_analyze');
+
+-- add a new unique index to user_ips table
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+  ('user_ips_device_unique_index', '{}', 'user_ips_remove_dupes');
+
+-- drop the old original index
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+  ('user_ips_drop_nonunique_index', '{}', 'user_ips_device_unique_index');
diff --git a/synapse/storage/schema/full_schemas/11/transactions.sql b/synapse/storage/schema/full_schemas/11/transactions.sql
index a3f4a0a790..f6a058832e 100644
--- a/synapse/storage/schema/full_schemas/11/transactions.sql
+++ b/synapse/storage/schema/full_schemas/11/transactions.sql
@@ -25,25 +25,6 @@ CREATE TABLE IF NOT EXISTS received_transactions(
 
 CREATE INDEX transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0;
 
-
--- Stores what transactions we've sent, what their response was (if we got one) and whether we have
--- since referenced the transaction in another outgoing transaction
-CREATE TABLE IF NOT EXISTS sent_transactions(
-    id INTEGER PRIMARY KEY AUTOINCREMENT, -- This is used to apply insertion ordering
-    transaction_id TEXT,
-    destination TEXT,
-    response_code INTEGER DEFAULT 0,
-    response_json TEXT,
-    ts BIGINT
-);
-
-CREATE INDEX sent_transaction_dest ON sent_transactions(destination);
-CREATE INDEX sent_transaction_txn_id ON sent_transactions(transaction_id);
--- So that we can do an efficient look up of all transactions that have yet to be successfully
--- sent.
-CREATE INDEX sent_transaction_sent ON sent_transactions(response_code);
-
-
 -- For sent transactions only.
 CREATE TABLE IF NOT EXISTS transaction_id_to_pdu(
     transaction_id INTEGER,
diff --git a/synapse/storage/schema/full_schemas/16/transactions.sql b/synapse/storage/schema/full_schemas/16/transactions.sql
index 14b67cce25..17e67bedac 100644
--- a/synapse/storage/schema/full_schemas/16/transactions.sql
+++ b/synapse/storage/schema/full_schemas/16/transactions.sql
@@ -25,25 +25,6 @@ CREATE TABLE IF NOT EXISTS received_transactions(
 
 CREATE INDEX transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0;
 
-
--- Stores what transactions we've sent, what their response was (if we got one) and whether we have
--- since referenced the transaction in another outgoing transaction
-CREATE TABLE IF NOT EXISTS sent_transactions(
-    id BIGINT PRIMARY KEY, -- This is used to apply insertion ordering
-    transaction_id TEXT,
-    destination TEXT,
-    response_code INTEGER DEFAULT 0,
-    response_json TEXT,
-    ts BIGINT
-);
-
-CREATE INDEX sent_transaction_dest ON sent_transactions(destination);
-CREATE INDEX sent_transaction_txn_id ON sent_transactions(transaction_id);
--- So that we can do an efficient look up of all transactions that have yet to be successfully
--- sent.
-CREATE INDEX sent_transaction_sent ON sent_transactions(response_code);
-
-
 -- For sent transactions only.
 CREATE TABLE IF NOT EXISTS transaction_id_to_pdu(
     transaction_id INTEGER,
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index d5b5df93e6..c6420b2374 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -45,6 +45,10 @@ class SearchStore(BackgroundUpdateStore):
 
     def __init__(self, db_conn, hs):
         super(SearchStore, self).__init__(db_conn, hs)
+
+        if not hs.config.enable_search:
+            return
+
         self.register_background_update_handler(
             self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search
         )
@@ -316,6 +320,8 @@ class SearchStore(BackgroundUpdateStore):
             entries (iterable[SearchEntry]):
                 entries to be added to the table
         """
+        if not self.hs.config.enable_search:
+            return
         if isinstance(self.database_engine, PostgresEngine):
             sql = (
                 "INSERT INTO event_search"
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index ef65929bb2..6ddc4055d2 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -428,14 +428,54 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         """
         # for now we do this by looking at the create event. We may want to cache this
         # more intelligently in future.
+
+        # Retrieve the room's create event
+        create_event = yield self.get_create_event_for_room(room_id)
+        defer.returnValue(create_event.content.get("room_version", "1"))
+
+    @defer.inlineCallbacks
+    def get_room_predecessor(self, room_id):
+        """Get the predecessor room of an upgraded room if one exists.
+        Otherwise return None.
+
+        Args:
+            room_id (str)
+
+        Returns:
+            Deferred[unicode|None]: predecessor room id
+
+        Raises:
+            NotFoundError if the room is unknown
+        """
+        # Retrieve the room's create event
+        create_event = yield self.get_create_event_for_room(room_id)
+
+        # Return predecessor if present
+        defer.returnValue(create_event.content.get("predecessor", None))
+
+    @defer.inlineCallbacks
+    def get_create_event_for_room(self, room_id):
+        """Get the create state event for a room.
+
+        Args:
+            room_id (str)
+
+        Returns:
+            Deferred[EventBase]: The room creation event.
+
+        Raises:
+            NotFoundError if the room is unknown
+        """
         state_ids = yield self.get_current_state_ids(room_id)
         create_id = state_ids.get((EventTypes.Create, ""))
 
+        # If we can't find the create event, assume we've hit a dead end
         if not create_id:
-            raise NotFoundError("Unknown room")
+            raise NotFoundError("Unknown room %s" % (room_id))
 
+        # Retrieve the room's create event and return
         create_event = yield self.get_event(create_id)
-        defer.returnValue(create_event.content.get("room_version", "1"))
+        defer.returnValue(create_event)
 
     @cached(max_entries=100000, iterable=True)
     def get_current_state_ids(self, room_id):
@@ -508,6 +548,31 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             _get_filtered_current_state_ids_txn,
         )
 
+    @defer.inlineCallbacks
+    def get_canonical_alias_for_room(self, room_id):
+        """Get canonical alias for room, if any
+
+        Args:
+            room_id (str)
+
+        Returns:
+            Deferred[str|None]: The canonical alias, if any
+        """
+
+        state = yield self.get_filtered_current_state_ids(room_id, StateFilter.from_types(
+            [(EventTypes.CanonicalAlias, "")]
+        ))
+
+        event_id = state.get((EventTypes.CanonicalAlias, ""))
+        if not event_id:
+            return
+
+        event = yield self.get_event(event_id, allow_none=True)
+        if not event:
+            return
+
+        defer.returnValue(event.content.get("canonical_alias"))
+
     @cached(max_entries=10000, iterable=True)
     def get_state_group_delta(self, state_group):
         """Given a state group try to return a previous group and a delta between
@@ -1257,6 +1322,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
     STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
     STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
     CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
+    EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
 
     def __init__(self, db_conn, hs):
         super(StateStore, self).__init__(db_conn, hs)
@@ -1275,6 +1341,12 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
             columns=["state_key"],
             where_clause="type='m.room.member'",
         )
+        self.register_background_index_update(
+            self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME,
+            index_name="event_to_state_groups_sg_index",
+            table="event_to_state_groups",
+            columns=["state_group"],
+        )
 
     def _store_event_state_mappings_txn(self, txn, events_and_contexts):
         state_groups = {}
diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py
index a8781b0e5d..fea866c043 100644
--- a/synapse/storage/user_directory.py
+++ b/synapse/storage/user_directory.py
@@ -22,6 +22,7 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, JoinRules
 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
+from synapse.storage.state import StateFilter
 from synapse.types import get_domain_from_id, get_localpart_from_id
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 
@@ -31,12 +32,19 @@ logger = logging.getLogger(__name__)
 
 
 class UserDirectoryStore(SQLBaseStore):
-    @cachedInlineCallbacks(cache_context=True)
-    def is_room_world_readable_or_publicly_joinable(self, room_id, cache_context):
+    @defer.inlineCallbacks
+    def is_room_world_readable_or_publicly_joinable(self, room_id):
         """Check if the room is either world_readable or publically joinable
         """
-        current_state_ids = yield self.get_current_state_ids(
-            room_id, on_invalidate=cache_context.invalidate
+
+        # Create a state filter that only queries join and history state event
+        types_to_filter = (
+            (EventTypes.JoinRules, ""),
+            (EventTypes.RoomHistoryVisibility, ""),
+        )
+
+        current_state_ids = yield self.get_filtered_current_state_ids(
+            room_id, StateFilter.from_types(types_to_filter)
         )
 
         join_rules_id = current_state_ids.get((EventTypes.JoinRules, ""))
@@ -66,14 +74,8 @@ class UserDirectoryStore(SQLBaseStore):
         """
         yield self._simple_insert_many(
             table="users_in_public_rooms",
-            values=[
-                {
-                    "user_id": user_id,
-                    "room_id": room_id,
-                }
-                for user_id in user_ids
-            ],
-            desc="add_users_to_public_room"
+            values=[{"user_id": user_id, "room_id": room_id} for user_id in user_ids],
+            desc="add_users_to_public_room",
         )
         for user_id in user_ids:
             self.get_user_in_public_room.invalidate((user_id,))
@@ -99,7 +101,9 @@ class UserDirectoryStore(SQLBaseStore):
             """
             args = (
                 (
-                    user_id, get_localpart_from_id(user_id), get_domain_from_id(user_id),
+                    user_id,
+                    get_localpart_from_id(user_id),
+                    get_domain_from_id(user_id),
                     profile.display_name,
                 )
                 for user_id, profile in iteritems(users_with_profile)
@@ -112,7 +116,7 @@ class UserDirectoryStore(SQLBaseStore):
             args = (
                 (
                     user_id,
-                    "%s %s" % (user_id, p.display_name,) if p.display_name else user_id
+                    "%s %s" % (user_id, p.display_name) if p.display_name else user_id,
                 )
                 for user_id, p in iteritems(users_with_profile)
             )
@@ -133,12 +137,10 @@ class UserDirectoryStore(SQLBaseStore):
                         "avatar_url": profile.avatar_url,
                     }
                     for user_id, profile in iteritems(users_with_profile)
-                ]
+                ],
             )
             for user_id in users_with_profile:
-                txn.call_after(
-                    self.get_user_in_directory.invalidate, (user_id,)
-                )
+                txn.call_after(self.get_user_in_directory.invalidate, (user_id,))
 
         return self.runInteraction(
             "add_profiles_to_user_dir", _add_profiles_to_user_dir_txn
@@ -168,39 +170,69 @@ class UserDirectoryStore(SQLBaseStore):
             if isinstance(self.database_engine, PostgresEngine):
                 # We weight the localpart most highly, then display name and finally
                 # server name
-                if new_entry:
+                if self.database_engine.can_native_upsert:
                     sql = """
                         INSERT INTO user_directory_search(user_id, vector)
                         VALUES (?,
                             setweight(to_tsvector('english', ?), 'A')
                             || setweight(to_tsvector('english', ?), 'D')
                             || setweight(to_tsvector('english', COALESCE(?, '')), 'B')
-                        )
+                        ) ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector
                     """
                     txn.execute(
                         sql,
                         (
-                            user_id, get_localpart_from_id(user_id),
-                            get_domain_from_id(user_id), display_name,
-                        )
+                            user_id,
+                            get_localpart_from_id(user_id),
+                            get_domain_from_id(user_id),
+                            display_name,
+                        ),
                     )
                 else:
-                    sql = """
-                        UPDATE user_directory_search
-                        SET vector = setweight(to_tsvector('english', ?), 'A')
-                            || setweight(to_tsvector('english', ?), 'D')
-                            || setweight(to_tsvector('english', COALESCE(?, '')), 'B')
-                        WHERE user_id = ?
-                    """
-                    txn.execute(
-                        sql,
-                        (
-                            get_localpart_from_id(user_id), get_domain_from_id(user_id),
-                            display_name, user_id,
+                    # TODO: Remove this code after we've bumped the minimum version
+                    # of postgres to always support upserts, so we can get rid of
+                    # `new_entry` usage
+                    if new_entry is True:
+                        sql = """
+                            INSERT INTO user_directory_search(user_id, vector)
+                            VALUES (?,
+                                setweight(to_tsvector('english', ?), 'A')
+                                || setweight(to_tsvector('english', ?), 'D')
+                                || setweight(to_tsvector('english', COALESCE(?, '')), 'B')
+                            )
+                        """
+                        txn.execute(
+                            sql,
+                            (
+                                user_id,
+                                get_localpart_from_id(user_id),
+                                get_domain_from_id(user_id),
+                                display_name,
+                            ),
+                        )
+                    elif new_entry is False:
+                        sql = """
+                            UPDATE user_directory_search
+                            SET vector = setweight(to_tsvector('english', ?), 'A')
+                                || setweight(to_tsvector('english', ?), 'D')
+                                || setweight(to_tsvector('english', COALESCE(?, '')), 'B')
+                            WHERE user_id = ?
+                        """
+                        txn.execute(
+                            sql,
+                            (
+                                get_localpart_from_id(user_id),
+                                get_domain_from_id(user_id),
+                                display_name,
+                                user_id,
+                            ),
+                        )
+                    else:
+                        raise RuntimeError(
+                            "upsert returned None when 'can_native_upsert' is False"
                         )
-                    )
             elif isinstance(self.database_engine, Sqlite3Engine):
-                value = "%s %s" % (user_id, display_name,) if display_name else user_id
+                value = "%s %s" % (user_id, display_name) if display_name else user_id
                 self._simple_upsert_txn(
                     txn,
                     table="user_directory_search",
@@ -231,29 +263,18 @@ class UserDirectoryStore(SQLBaseStore):
     def remove_from_user_dir(self, user_id):
         def _remove_from_user_dir_txn(txn):
             self._simple_delete_txn(
-                txn,
-                table="user_directory",
-                keyvalues={"user_id": user_id},
+                txn, table="user_directory", keyvalues={"user_id": user_id}
             )
             self._simple_delete_txn(
-                txn,
-                table="user_directory_search",
-                keyvalues={"user_id": user_id},
+                txn, table="user_directory_search", keyvalues={"user_id": user_id}
             )
             self._simple_delete_txn(
-                txn,
-                table="users_in_public_rooms",
-                keyvalues={"user_id": user_id},
-            )
-            txn.call_after(
-                self.get_user_in_directory.invalidate, (user_id,)
+                txn, table="users_in_public_rooms", keyvalues={"user_id": user_id}
             )
-            txn.call_after(
-                self.get_user_in_public_room.invalidate, (user_id,)
-            )
-        return self.runInteraction(
-            "remove_from_user_dir", _remove_from_user_dir_txn,
-        )
+            txn.call_after(self.get_user_in_directory.invalidate, (user_id,))
+            txn.call_after(self.get_user_in_public_room.invalidate, (user_id,))
+
+        return self.runInteraction("remove_from_user_dir", _remove_from_user_dir_txn)
 
     @defer.inlineCallbacks
     def remove_from_user_in_public_room(self, user_id):
@@ -338,6 +359,7 @@ class UserDirectoryStore(SQLBaseStore):
             share_private (bool): Is the room private
             user_id_tuples([(str, str)]): iterable of 2-tuple of user IDs.
         """
+
         def _add_users_who_share_room_txn(txn):
             self._simple_insert_many_txn(
                 txn,
@@ -354,13 +376,12 @@ class UserDirectoryStore(SQLBaseStore):
             )
             for user_id, other_user_id in user_id_tuples:
                 txn.call_after(
-                    self.get_users_who_share_room_from_dir.invalidate,
-                    (user_id,),
+                    self.get_users_who_share_room_from_dir.invalidate, (user_id,)
                 )
                 txn.call_after(
-                    self.get_if_users_share_a_room.invalidate,
-                    (user_id, other_user_id),
+                    self.get_if_users_share_a_room.invalidate, (user_id, other_user_id)
                 )
+
         return self.runInteraction(
             "add_users_who_share_room", _add_users_who_share_room_txn
         )
@@ -374,6 +395,7 @@ class UserDirectoryStore(SQLBaseStore):
             share_private (bool): Is the room private
             user_id_tuples([(str, str)]): iterable of 2-tuple of user IDs.
         """
+
         def _update_users_who_share_room_txn(txn):
             sql = """
                 UPDATE users_who_share_rooms
@@ -381,21 +403,16 @@ class UserDirectoryStore(SQLBaseStore):
                 WHERE user_id = ? AND other_user_id = ?
             """
             txn.executemany(
-                sql,
-                (
-                    (room_id, share_private, uid, oid)
-                    for uid, oid in user_id_sets
-                )
+                sql, ((room_id, share_private, uid, oid) for uid, oid in user_id_sets)
             )
             for user_id, other_user_id in user_id_sets:
                 txn.call_after(
-                    self.get_users_who_share_room_from_dir.invalidate,
-                    (user_id,),
+                    self.get_users_who_share_room_from_dir.invalidate, (user_id,)
                 )
                 txn.call_after(
-                    self.get_if_users_share_a_room.invalidate,
-                    (user_id, other_user_id),
+                    self.get_if_users_share_a_room.invalidate, (user_id, other_user_id)
                 )
+
         return self.runInteraction(
             "update_users_who_share_room", _update_users_who_share_room_txn
         )
@@ -409,22 +426,18 @@ class UserDirectoryStore(SQLBaseStore):
             share_private (bool): Is the room private
             user_id_tuples([(str, str)]): iterable of 2-tuple of user IDs.
         """
+
         def _remove_user_who_share_room_txn(txn):
             self._simple_delete_txn(
                 txn,
                 table="users_who_share_rooms",
-                keyvalues={
-                    "user_id": user_id,
-                    "other_user_id": other_user_id,
-                },
+                keyvalues={"user_id": user_id, "other_user_id": other_user_id},
             )
             txn.call_after(
-                self.get_users_who_share_room_from_dir.invalidate,
-                (user_id,),
+                self.get_users_who_share_room_from_dir.invalidate, (user_id,)
             )
             txn.call_after(
-                self.get_if_users_share_a_room.invalidate,
-                (user_id, other_user_id),
+                self.get_if_users_share_a_room.invalidate, (user_id, other_user_id)
             )
 
         return self.runInteraction(
@@ -445,10 +458,7 @@ class UserDirectoryStore(SQLBaseStore):
         """
         return self._simple_select_one_onecol(
             table="users_who_share_rooms",
-            keyvalues={
-                "user_id": user_id,
-                "other_user_id": other_user_id,
-            },
+            keyvalues={"user_id": user_id, "other_user_id": other_user_id},
             retcol="share_private",
             allow_none=True,
             desc="get_if_users_share_a_room",
@@ -466,17 +476,12 @@ class UserDirectoryStore(SQLBaseStore):
         """
         rows = yield self._simple_select_list(
             table="users_who_share_rooms",
-            keyvalues={
-                "user_id": user_id,
-            },
-            retcols=("other_user_id", "share_private",),
+            keyvalues={"user_id": user_id},
+            retcols=("other_user_id", "share_private"),
             desc="get_users_who_share_room_with_user",
         )
 
-        defer.returnValue({
-            row["other_user_id"]: row["share_private"]
-            for row in rows
-        })
+        defer.returnValue({row["other_user_id"]: row["share_private"] for row in rows})
 
     def get_users_in_share_dir_with_room_id(self, user_id, room_id):
         """Get all user tuples that are in the users_who_share_rooms due to the
@@ -523,6 +528,7 @@ class UserDirectoryStore(SQLBaseStore):
     def delete_all_from_user_dir(self):
         """Delete the entire user directory
         """
+
         def _delete_all_from_user_dir_txn(txn):
             txn.execute("DELETE FROM user_directory")
             txn.execute("DELETE FROM user_directory_search")
@@ -532,6 +538,7 @@ class UserDirectoryStore(SQLBaseStore):
             txn.call_after(self.get_user_in_public_room.invalidate_all)
             txn.call_after(self.get_users_who_share_room_from_dir.invalidate_all)
             txn.call_after(self.get_if_users_share_a_room.invalidate_all)
+
         return self.runInteraction(
             "delete_all_from_user_dir", _delete_all_from_user_dir_txn
         )
@@ -541,7 +548,7 @@ class UserDirectoryStore(SQLBaseStore):
         return self._simple_select_one(
             table="user_directory",
             keyvalues={"user_id": user_id},
-            retcols=("room_id", "display_name", "avatar_url",),
+            retcols=("room_id", "display_name", "avatar_url"),
             allow_none=True,
             desc="get_user_in_directory",
         )
@@ -574,7 +581,9 @@ class UserDirectoryStore(SQLBaseStore):
 
     def get_current_state_deltas(self, prev_stream_id):
         prev_stream_id = int(prev_stream_id)
-        if not self._curr_state_delta_stream_cache.has_any_entity_changed(prev_stream_id):
+        if not self._curr_state_delta_stream_cache.has_any_entity_changed(
+            prev_stream_id
+        ):
             return []
 
         def get_current_state_deltas_txn(txn):
@@ -608,7 +617,7 @@ class UserDirectoryStore(SQLBaseStore):
                 WHERE ? < stream_id AND stream_id <= ?
                 ORDER BY stream_id ASC
             """
-            txn.execute(sql, (prev_stream_id, max_stream_id,))
+            txn.execute(sql, (prev_stream_id, max_stream_id))
             return self.cursor_to_dict(txn)
 
         return self.runInteraction(
@@ -698,8 +707,11 @@ class UserDirectoryStore(SQLBaseStore):
                     display_name IS NULL,
                     avatar_url IS NULL
                 LIMIT ?
-            """ % (join_clause, where_clause)
-            args = join_args + (full_query, exact_query, prefix_query, limit + 1,)
+            """ % (
+                join_clause,
+                where_clause,
+            )
+            args = join_args + (full_query, exact_query, prefix_query, limit + 1)
         elif isinstance(self.database_engine, Sqlite3Engine):
             search_query = _parse_query_sqlite(search_term)
 
@@ -716,7 +728,10 @@ class UserDirectoryStore(SQLBaseStore):
                     display_name IS NULL,
                     avatar_url IS NULL
                 LIMIT ?
-            """ % (join_clause, where_clause)
+            """ % (
+                join_clause,
+                where_clause,
+            )
             args = join_args + (search_query, limit + 1)
         else:
             # This should be unreachable.
@@ -728,10 +743,7 @@ class UserDirectoryStore(SQLBaseStore):
 
         limited = len(results) > limit
 
-        defer.returnValue({
-            "limited": limited,
-            "results": results,
-        })
+        defer.returnValue({"limited": limited, "results": results})
 
 
 def _parse_query_sqlite(search_term):
@@ -746,7 +758,7 @@ def _parse_query_sqlite(search_term):
 
     # Pull out the individual words, discarding any non-word characters.
     results = re.findall(r"([\w\-]+)", search_term, re.UNICODE)
-    return " & ".join("(%s* OR %s)" % (result, result,) for result in results)
+    return " & ".join("(%s* OR %s)" % (result, result) for result in results)
 
 
 def _parse_query_postgres(search_term):
@@ -759,7 +771,7 @@ def _parse_query_postgres(search_term):
     # Pull out the individual words, discarding any non-word characters.
     results = re.findall(r"([\w\-]+)", search_term, re.UNICODE)
 
-    both = " & ".join("(%s:* | %s)" % (result, result,) for result in results)
+    both = " & ".join("(%s:* | %s)" % (result, result) for result in results)
     exact = " & ".join("%s" % (result,) for result in results)
     prefix = " & ".join("%s:*" % (result,) for result in results)
 
diff --git a/synapse/types.py b/synapse/types.py
index 8a77eda357..c6e128c064 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -12,6 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import re
 import string
 from collections import namedtuple
 
@@ -240,6 +241,71 @@ def strip_invalid_mxid_characters(localpart):
     return filter(lambda c: c in mxid_localpart_allowed_characters, localpart)
 
 
+UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
+
+# the following is a pattern which matches '=', and bytes which are not allowed in a mxid
+# localpart.
+#
+# It works by:
+#  * building a string containing the allowed characters (excluding '=')
+#  * escaping every special character with a backslash (to stop '-' being interpreted as a
+#    range operator)
+#  * wrapping it in a '[^...]' regex
+#  * converting the whole lot to a 'bytes' sequence, so that we can use it to match
+#    bytes rather than strings
+#
+NON_MXID_CHARACTER_PATTERN = re.compile(
+    ("[^%s]" % (
+        re.escape("".join(mxid_localpart_allowed_characters - {"="}),),
+    )).encode("ascii"),
+)
+
+
+def map_username_to_mxid_localpart(username, case_sensitive=False):
+    """Map a username onto a string suitable for a MXID
+
+    This follows the algorithm laid out at
+    https://matrix.org/docs/spec/appendices.html#mapping-from-other-character-sets.
+
+    Args:
+        username (unicode|bytes): username to be mapped
+        case_sensitive (bool): true if TEST and test should be mapped
+            onto different mxids
+
+    Returns:
+        unicode: string suitable for a mxid localpart
+    """
+    if not isinstance(username, bytes):
+        username = username.encode('utf-8')
+
+    # first we sort out upper-case characters
+    if case_sensitive:
+        def f1(m):
+            return b"_" + m.group().lower()
+
+        username = UPPER_CASE_PATTERN.sub(f1, username)
+    else:
+        username = username.lower()
+
+    # then we sort out non-ascii characters
+    def f2(m):
+        g = m.group()[0]
+        if isinstance(g, str):
+            # on python 2, we need to do a ord(). On python 3, the
+            # byte itself will do.
+            g = ord(g)
+        return b"=%02x" % (g,)
+
+    username = NON_MXID_CHARACTER_PATTERN.sub(f2, username)
+
+    # we also do the =-escaping to mxids starting with an underscore.
+    username = re.sub(b'^_', b'=5f', username)
+
+    # we should now only have ascii bytes left, so can decode back to a
+    # unicode.
+    return username.decode('ascii')
+
+
 class StreamToken(
     namedtuple("Token", (
         "room_key",
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index ec7b2c9672..f0e4a0e10c 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -201,7 +201,7 @@ class Linearizer(object):
         if entry[0] >= self.max_count:
             res = self._await_lock(key)
         else:
-            logger.info(
+            logger.debug(
                 "Acquired uncontended linearizer lock %r for key %r", self.name, key,
             )
             entry[0] += 1
@@ -215,7 +215,7 @@ class Linearizer(object):
             try:
                 yield
             finally:
-                logger.info("Releasing linearizer lock %r for key %r", self.name, key)
+                logger.debug("Releasing linearizer lock %r for key %r", self.name, key)
 
                 # We've finished executing so check if there are any things
                 # blocked waiting to execute and start one of them
@@ -247,7 +247,7 @@ class Linearizer(object):
         """
         entry = self.key_to_defer[key]
 
-        logger.info(
+        logger.debug(
             "Waiting to acquire linearizer lock %r for key %r", self.name, key,
         )
 
@@ -255,7 +255,7 @@ class Linearizer(object):
         entry[1][new_defer] = 1
 
         def cb(_r):
-            logger.info("Acquired linearizer lock %r for key %r", self.name, key)
+            logger.debug("Acquired linearizer lock %r for key %r", self.name, key)
             entry[0] += 1
 
             # if the code holding the lock completes synchronously, then it
@@ -273,7 +273,7 @@ class Linearizer(object):
         def eb(e):
             logger.info("defer %r got err %r", new_defer, e)
             if isinstance(e, CancelledError):
-                logger.info(
+                logger.debug(
                     "Cancelling wait for linearizer lock %r for key %r",
                     self.name, key,
                 )
@@ -387,12 +387,14 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None):
     deferred that wraps and times out the given deferred, correctly handling
     the case where the given deferred's canceller throws.
 
+    (See https://twistedmatrix.com/trac/ticket/9534)
+
     NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred
 
     Args:
         deferred (Deferred)
         timeout (float): Timeout in seconds
-        reactor (twisted.internet.reactor): The twisted reactor to use
+        reactor (twisted.interfaces.IReactorTime): The twisted reactor to use
         on_timeout_cancel (callable): A callable which is called immediately
             after the deferred times out, and not if this deferred is
             otherwise cancelled before the timeout.
diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py
new file mode 100644
index 0000000000..5ba1862506
--- /dev/null
+++ b/synapse/util/caches/ttlcache.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+
+import attr
+from sortedcontainers import SortedList
+
+from synapse.util.caches import register_cache
+
+logger = logging.getLogger(__name__)
+
+SENTINEL = object()
+
+
+class TTLCache(object):
+    """A key/value cache implementation where each entry has its own TTL"""
+
+    def __init__(self, cache_name, timer=time.time):
+        # map from key to _CacheEntry
+        self._data = {}
+
+        # the _CacheEntries, sorted by expiry time
+        self._expiry_list = SortedList()
+
+        self._timer = timer
+
+        self._metrics = register_cache("ttl", cache_name, self)
+
+    def set(self, key, value, ttl):
+        """Add/update an entry in the cache
+
+        Args:
+            key: key for this entry
+            value: value for this entry
+            ttl (float): TTL for this entry, in seconds
+        """
+        expiry = self._timer() + ttl
+
+        self.expire()
+        e = self._data.pop(key, SENTINEL)
+        if e != SENTINEL:
+            self._expiry_list.remove(e)
+
+        entry = _CacheEntry(expiry_time=expiry, key=key, value=value)
+        self._data[key] = entry
+        self._expiry_list.add(entry)
+
+    def get(self, key, default=SENTINEL):
+        """Get a value from the cache
+
+        Args:
+            key: key to look up
+            default: default value to return, if key is not found. If not set, and the
+                key is not found, a KeyError will be raised
+
+        Returns:
+            value from the cache, or the default
+        """
+        self.expire()
+        e = self._data.get(key, SENTINEL)
+        if e == SENTINEL:
+            self._metrics.inc_misses()
+            if default == SENTINEL:
+                raise KeyError(key)
+            return default
+        self._metrics.inc_hits()
+        return e.value
+
+    def get_with_expiry(self, key):
+        """Get a value, and its expiry time, from the cache
+
+        Args:
+            key: key to look up
+
+        Returns:
+            Tuple[Any, float]: the value from the cache, and the expiry time
+
+        Raises:
+            KeyError if the entry is not found
+        """
+        self.expire()
+        try:
+            e = self._data[key]
+        except KeyError:
+            self._metrics.inc_misses()
+            raise
+        self._metrics.inc_hits()
+        return e.value, e.expiry_time
+
+    def pop(self, key, default=SENTINEL):
+        """Remove a value from the cache
+
+        If key is in the cache, remove it and return its value, else return default.
+        If default is not given and key is not in the cache, a KeyError is raised.
+
+        Args:
+            key: key to look up
+            default: default value to return, if key is not found. If not set, and the
+                key is not found, a KeyError will be raised
+
+        Returns:
+            value from the cache, or the default
+        """
+        self.expire()
+        e = self._data.pop(key, SENTINEL)
+        if e == SENTINEL:
+            self._metrics.inc_misses()
+            if default == SENTINEL:
+                raise KeyError(key)
+            return default
+        self._expiry_list.remove(e)
+        self._metrics.inc_hits()
+        return e.value
+
+    def __getitem__(self, key):
+        return self.get(key)
+
+    def __delitem__(self, key):
+        self.pop(key)
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        self.expire()
+        return len(self._data)
+
+    def expire(self):
+        """Run the expiry on the cache. Any entries whose expiry times are due will
+        be removed
+        """
+        now = self._timer()
+        while self._expiry_list:
+            first_entry = self._expiry_list[0]
+            if first_entry.expiry_time - now > 0.0:
+                break
+            del self._data[first_entry.key]
+            del self._expiry_list[0]
+
+
+@attr.s(frozen=True, slots=True)
+class _CacheEntry(object):
+    """TTLCache entry"""
+    # expiry_time is the first attribute, so that entries are sorted by expiry.
+    expiry_time = attr.ib()
+    key = attr.ib()
+    value = attr.ib()
diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py
index 4c6e92beb8..311b49e18a 100644
--- a/synapse/util/logcontext.py
+++ b/synapse/util/logcontext.py
@@ -285,7 +285,10 @@ class LoggingContext(object):
         self.alive = False
 
         # if we have a parent, pass our CPU usage stats on
-        if self.parent_context is not None:
+        if (
+            self.parent_context is not None
+            and hasattr(self.parent_context, '_resource_usage')
+        ):
             self.parent_context._resource_usage += self._resource_usage
 
             # reset them in case we get entered again
diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
index 6f318c6a29..fdcb375f95 100644
--- a/synapse/util/stringutils.py
+++ b/synapse/util/stringutils.py
@@ -16,7 +16,8 @@
 import random
 import string
 
-from six import PY3
+import six
+from six import PY2, PY3
 from six.moves import range
 
 _string_with_symbols = (
@@ -71,3 +72,39 @@ def to_ascii(s):
         return s.encode("ascii")
     except UnicodeEncodeError:
         return s
+
+
+def exception_to_unicode(e):
+    """Helper function to extract the text of an exception as a unicode string
+
+    Args:
+        e (Exception): exception to be stringified
+
+    Returns:
+        unicode
+    """
+    # urgh, this is a mess. The basic problem here is that psycopg2 constructs its
+    # exceptions with PyErr_SetString, with a (possibly non-ascii) argument. str() will
+    # then produce the raw byte sequence. Under Python 2, this will then cause another
+    # error if it gets mixed with a `unicode` object, as per
+    # https://github.com/matrix-org/synapse/issues/4252
+
+    # First of all, if we're under python3, everything is fine because it will sort this
+    # nonsense out for us.
+    if not PY2:
+        return str(e)
+
+    # otherwise let's have a stab at decoding the exception message. We'll circumvent
+    # Exception.__str__(), which would explode if someone raised Exception(u'non-ascii')
+    # and instead look at what is in the args member.
+
+    if len(e.args) == 0:
+        return u""
+    elif len(e.args) > 1:
+        return six.text_type(repr(e.args))
+
+    msg = e.args[0]
+    if isinstance(msg, bytes):
+        return msg.decode('utf-8', errors='replace')
+    else:
+        return msg
diff --git a/synctl b/synctl
index 7e79b05c39..816c898b36 100755
--- a/synctl
+++ b/synctl
@@ -156,7 +156,9 @@ def main():
         write(
             "No config file found\n"
             "To generate a config file, run '%s -c %s --generate-config"
-            " --server-name=<server name>'\n" % (" ".join(SYNAPSE), options.configfile),
+            " --server-name=<server name> --report-stats=<yes/no>'\n" % (
+                " ".join(SYNAPSE), options.configfile,
+            ),
             stream=sys.stderr,
         )
         sys.exit(1)
diff --git a/tests/__init__.py b/tests/__init__.py
index 9d9ca22829..d3181f9403 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +16,9 @@
 
 from twisted.trial import util
 
-from tests import utils
+import tests.patch_inline_callbacks
+
+# attempt to do the patch before we load any synapse code
+tests.patch_inline_callbacks.do_patch()
 
 util.DEFAULT_TIMEOUT_DURATION = 10
-utils.setupdb()
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index 379e9c4ab1..d77f20e876 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -50,6 +50,8 @@ class AuthTestCase(unittest.TestCase):
         # this is overridden for the appservice tests
         self.store.get_app_service_by_token = Mock(return_value=None)
 
+        self.store.is_support_user = Mock(return_value=defer.succeed(False))
+
     @defer.inlineCallbacks
     def test_get_user_by_req_user_valid_token(self):
         user_info = {"name": self.test_user, "token_id": "ditto", "device_id": "device"}
@@ -192,8 +194,6 @@ class AuthTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_user_from_macaroon(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
         self.store.get_user_by_access_token = Mock(
             return_value={"name": "@baldrick:matrix.org", "device_id": "device"}
         )
@@ -218,6 +218,7 @@ class AuthTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_get_guest_user_from_macaroon(self):
         self.store.get_user_by_id = Mock(return_value={"is_guest": True})
+        self.store.get_user_by_access_token = Mock(return_value=None)
 
         user_id = "@baldrick:matrix.org"
         macaroon = pymacaroons.Macaroon(
@@ -239,158 +240,6 @@ class AuthTestCase(unittest.TestCase):
         self.store.get_user_by_id.assert_called_with(user_id)
 
     @defer.inlineCallbacks
-    def test_get_user_from_macaroon_user_db_mismatch(self):
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@percy:matrix.org"}
-        )
-
-        user = "@baldrick:matrix.org"
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key,
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-        macaroon.add_first_party_caveat("user_id = %s" % (user,))
-        with self.assertRaises(AuthError) as cm:
-            yield self.auth.get_user_by_access_token(macaroon.serialize())
-        self.assertEqual(401, cm.exception.code)
-        self.assertIn("User mismatch", cm.exception.msg)
-
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_missing_caveat(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key,
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-
-        with self.assertRaises(AuthError) as cm:
-            yield self.auth.get_user_by_access_token(macaroon.serialize())
-        self.assertEqual(401, cm.exception.code)
-        self.assertIn("No user caveat", cm.exception.msg)
-
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_wrong_key(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        user = "@baldrick:matrix.org"
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key + "wrong",
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-        macaroon.add_first_party_caveat("user_id = %s" % (user,))
-
-        with self.assertRaises(AuthError) as cm:
-            yield self.auth.get_user_by_access_token(macaroon.serialize())
-        self.assertEqual(401, cm.exception.code)
-        self.assertIn("Invalid macaroon", cm.exception.msg)
-
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_unknown_caveat(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        user = "@baldrick:matrix.org"
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key,
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-        macaroon.add_first_party_caveat("user_id = %s" % (user,))
-        macaroon.add_first_party_caveat("cunning > fox")
-
-        with self.assertRaises(AuthError) as cm:
-            yield self.auth.get_user_by_access_token(macaroon.serialize())
-        self.assertEqual(401, cm.exception.code)
-        self.assertIn("Invalid macaroon", cm.exception.msg)
-
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_expired(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        user = "@baldrick:matrix.org"
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key,
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-        macaroon.add_first_party_caveat("user_id = %s" % (user,))
-        macaroon.add_first_party_caveat("time < -2000")  # ms
-
-        self.hs.clock.now = 5000  # seconds
-        self.hs.config.expire_access_token = True
-        # yield self.auth.get_user_by_access_token(macaroon.serialize())
-        # TODO(daniel): Turn on the check that we validate expiration, when we
-        # validate expiration (and remove the above line, which will start
-        # throwing).
-        with self.assertRaises(AuthError) as cm:
-            yield self.auth.get_user_by_access_token(macaroon.serialize())
-        self.assertEqual(401, cm.exception.code)
-        self.assertIn("Invalid macaroon", cm.exception.msg)
-
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_with_valid_duration(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        user_id = "@baldrick:matrix.org"
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key,
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-        macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
-        macaroon.add_first_party_caveat("time < 900000000")  # ms
-
-        self.hs.clock.now = 5000  # seconds
-        self.hs.config.expire_access_token = True
-
-        user_info = yield self.auth.get_user_by_access_token(macaroon.serialize())
-        user = user_info["user"]
-        self.assertEqual(UserID.from_string(user_id), user)
-
-    @defer.inlineCallbacks
     def test_cannot_use_regular_token_as_guest(self):
         USER_ID = "@percy:matrix.org"
         self.store.add_access_token_to_user = Mock()
diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py
index a83f567ebd..8bdbc608a9 100644
--- a/tests/app/test_frontend_proxy.py
+++ b/tests/app/test_frontend_proxy.py
@@ -59,7 +59,7 @@ class FrontendProxyTests(HomeserverTestCase):
 
     def test_listen_http_with_presence_disabled(self):
         """
-        When presence is on, the stub servlet will register.
+        When presence is off, the stub servlet will register.
         """
         # Presence is off
         self.hs.config.use_presence = False
diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py
new file mode 100644
index 0000000000..590abc1e92
--- /dev/null
+++ b/tests/app/test_openid_listener.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from mock import Mock, patch
+
+from parameterized import parameterized
+
+from synapse.app.federation_reader import FederationReaderServer
+from synapse.app.homeserver import SynapseHomeServer
+
+from tests.unittest import HomeserverTestCase
+
+
+class FederationReaderOpenIDListenerTests(HomeserverTestCase):
+    def make_homeserver(self, reactor, clock):
+        hs = self.setup_test_homeserver(
+            http_client=None, homeserverToUse=FederationReaderServer,
+        )
+        return hs
+
+    @parameterized.expand([
+        (["federation"], "auth_fail"),
+        ([], "no_resource"),
+        (["openid", "federation"], "auth_fail"),
+        (["openid"], "auth_fail"),
+    ])
+    def test_openid_listener(self, names, expectation):
+        """
+        Test different openid listener configurations.
+
+        401 is success here since it means we hit the handler and auth failed.
+        """
+        config = {
+            "port": 8080,
+            "bind_addresses": ["0.0.0.0"],
+            "resources": [{"names": names}],
+        }
+
+        # Listen with the config
+        self.hs._listen_http(config)
+
+        # Grab the resource from the site that was told to listen
+        site = self.reactor.tcpServers[0][1]
+        try:
+            self.resource = (
+                site.resource.children[b"_matrix"].children[b"federation"]
+            )
+        except KeyError:
+            if expectation == "no_resource":
+                return
+            raise
+
+        request, channel = self.make_request(
+            "GET",
+            "/_matrix/federation/v1/openid/userinfo",
+        )
+        self.render(request)
+
+        self.assertEqual(channel.code, 401)
+
+
+@patch("synapse.app.homeserver.KeyApiV2Resource", new=Mock())
+class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase):
+    def make_homeserver(self, reactor, clock):
+        hs = self.setup_test_homeserver(
+            http_client=None, homeserverToUse=SynapseHomeServer,
+        )
+        return hs
+
+    @parameterized.expand([
+        (["federation"], "auth_fail"),
+        ([], "no_resource"),
+        (["openid", "federation"], "auth_fail"),
+        (["openid"], "auth_fail"),
+    ])
+    def test_openid_listener(self, names, expectation):
+        """
+        Test different openid listener configurations.
+
+        401 is success here since it means we hit the handler and auth failed.
+        """
+        config = {
+            "port": 8080,
+            "bind_addresses": ["0.0.0.0"],
+            "resources": [{"names": names}],
+        }
+
+        # Listen with the config
+        self.hs._listener_http(config, config)
+
+        # Grab the resource from the site that was told to listen
+        site = self.reactor.tcpServers[0][1]
+        try:
+            self.resource = (
+                site.resource.children[b"_matrix"].children[b"federation"]
+            )
+        except KeyError:
+            if expectation == "no_resource":
+                return
+            raise
+
+        request, channel = self.make_request(
+            "GET",
+            "/_matrix/federation/v1/openid/userinfo",
+        )
+        self.render(request)
+
+        self.assertEqual(channel.code, 401)
diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py
index 0c23068bcf..795b4c298d 100644
--- a/tests/config/test_generate.py
+++ b/tests/config/test_generate.py
@@ -50,9 +50,6 @@ class ConfigGenerationTestCase(unittest.TestCase):
                     "homeserver.yaml",
                     "lemurs.win.log.config",
                     "lemurs.win.signing.key",
-                    "lemurs.win.tls.crt",
-                    "lemurs.win.tls.dh",
-                    "lemurs.win.tls.key",
                 ]
             ),
             set(os.listdir(self.dir)),
diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py
index f37a17d618..3dc2631523 100644
--- a/tests/config/test_room_directory.py
+++ b/tests/config/test_room_directory.py
@@ -36,6 +36,8 @@ class RoomDirectoryConfigTestCase(unittest.TestCase):
             - user_id: "@gah:example.com"
               alias: "#goo:example.com"
               action: "allow"
+
+        room_list_publication_rules: []
         """)
 
         rd_config = RoomDirectoryConfig()
@@ -43,25 +45,102 @@ class RoomDirectoryConfigTestCase(unittest.TestCase):
 
         self.assertFalse(rd_config.is_alias_creation_allowed(
             user_id="@bob:example.com",
+            room_id="!test",
             alias="#test:example.com",
         ))
 
         self.assertTrue(rd_config.is_alias_creation_allowed(
             user_id="@test:example.com",
+            room_id="!test",
             alias="#unofficial_st:example.com",
         ))
 
         self.assertTrue(rd_config.is_alias_creation_allowed(
             user_id="@foobar:example.com",
+            room_id="!test",
             alias="#test:example.com",
         ))
 
         self.assertTrue(rd_config.is_alias_creation_allowed(
             user_id="@gah:example.com",
+            room_id="!test",
             alias="#goo:example.com",
         ))
 
         self.assertFalse(rd_config.is_alias_creation_allowed(
             user_id="@test:example.com",
+            room_id="!test",
             alias="#test:example.com",
         ))
+
+    def test_room_publish_acl(self):
+        config = yaml.load("""
+        alias_creation_rules: []
+
+        room_list_publication_rules:
+            - user_id: "*bob*"
+              alias: "*"
+              action: "deny"
+            - user_id: "*"
+              alias: "#unofficial_*"
+              action: "allow"
+            - user_id: "@foo*:example.com"
+              alias: "*"
+              action: "allow"
+            - user_id: "@gah:example.com"
+              alias: "#goo:example.com"
+              action: "allow"
+            - room_id: "!test-deny"
+              action: "deny"
+        """)
+
+        rd_config = RoomDirectoryConfig()
+        rd_config.read_config(config)
+
+        self.assertFalse(rd_config.is_publishing_room_allowed(
+            user_id="@bob:example.com",
+            room_id="!test",
+            aliases=["#test:example.com"],
+        ))
+
+        self.assertTrue(rd_config.is_publishing_room_allowed(
+            user_id="@test:example.com",
+            room_id="!test",
+            aliases=["#unofficial_st:example.com"],
+        ))
+
+        self.assertTrue(rd_config.is_publishing_room_allowed(
+            user_id="@foobar:example.com",
+            room_id="!test",
+            aliases=[],
+        ))
+
+        self.assertTrue(rd_config.is_publishing_room_allowed(
+            user_id="@gah:example.com",
+            room_id="!test",
+            aliases=["#goo:example.com"],
+        ))
+
+        self.assertFalse(rd_config.is_publishing_room_allowed(
+            user_id="@test:example.com",
+            room_id="!test",
+            aliases=["#test:example.com"],
+        ))
+
+        self.assertTrue(rd_config.is_publishing_room_allowed(
+            user_id="@foobar:example.com",
+            room_id="!test-deny",
+            aliases=[],
+        ))
+
+        self.assertFalse(rd_config.is_publishing_room_allowed(
+            user_id="@gah:example.com",
+            room_id="!test-deny",
+            aliases=[],
+        ))
+
+        self.assertTrue(rd_config.is_publishing_room_allowed(
+            user_id="@test:example.com",
+            room_id="!test",
+            aliases=["#unofficial_st:example.com", "#blah:example.com"],
+        ))
diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py
new file mode 100644
index 0000000000..c260d3359f
--- /dev/null
+++ b/tests/config/test_tls.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from synapse.config.tls import TlsConfig
+
+from tests.unittest import TestCase
+
+
+class TestConfig(TlsConfig):
+    def has_tls_listener(self):
+        return False
+
+
+class TLSConfigTests(TestCase):
+
+    def test_warn_self_signed(self):
+        """
+        Synapse will give a warning when it loads a self-signed certificate.
+        """
+        config_dir = self.mktemp()
+        os.mkdir(config_dir)
+        with open(os.path.join(config_dir, "cert.pem"), 'w') as f:
+            f.write("""-----BEGIN CERTIFICATE-----
+MIID6DCCAtACAws9CjANBgkqhkiG9w0BAQUFADCBtzELMAkGA1UEBhMCVFIxDzAN
+BgNVBAgMBsOHb3J1bTEUMBIGA1UEBwwLQmHFn21ha8OnxLExEjAQBgNVBAMMCWxv
+Y2FsaG9zdDEcMBoGA1UECgwTVHdpc3RlZCBNYXRyaXggTGFiczEkMCIGA1UECwwb
+QXV0b21hdGVkIFRlc3RpbmcgQXV0aG9yaXR5MSkwJwYJKoZIhvcNAQkBFhpzZWN1
+cml0eUB0d2lzdGVkbWF0cml4LmNvbTAgFw0xNzA3MTIxNDAxNTNaGA8yMTE3MDYx
+ODE0MDE1M1owgbcxCzAJBgNVBAYTAlRSMQ8wDQYDVQQIDAbDh29ydW0xFDASBgNV
+BAcMC0JhxZ9tYWvDp8SxMRIwEAYDVQQDDAlsb2NhbGhvc3QxHDAaBgNVBAoME1R3
+aXN0ZWQgTWF0cml4IExhYnMxJDAiBgNVBAsMG0F1dG9tYXRlZCBUZXN0aW5nIEF1
+dGhvcml0eTEpMCcGCSqGSIb3DQEJARYac2VjdXJpdHlAdHdpc3RlZG1hdHJpeC5j
+b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDwT6kbqtMUI0sMkx4h
+I+L780dA59KfksZCqJGmOsMD6hte9EguasfkZzvCF3dk3NhwCjFSOvKx6rCwiteo
+WtYkVfo+rSuVNmt7bEsOUDtuTcaxTzIFB+yHOYwAaoz3zQkyVW0c4pzioiLCGCmf
+FLdiDBQGGp74tb+7a0V6kC3vMLFoM3L6QWq5uYRB5+xLzlPJ734ltyvfZHL3Us6p
+cUbK+3WTWvb4ER0W2RqArAj6Bc/ERQKIAPFEiZi9bIYTwvBH27OKHRz+KoY/G8zY
++l+WZoJqDhupRAQAuh7O7V/y6bSP+KNxJRie9QkZvw1PSaGSXtGJI3WWdO12/Ulg
+epJpAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAJXEq5P9xwvP9aDkXIqzcD0L8sf8
+ewlhlxTQdeqt2Nace0Yk18lIo2oj1t86Y8jNbpAnZJeI813Rr5M7FbHCXoRc/SZG
+I8OtG1xGwcok53lyDuuUUDexnK4O5BkjKiVlNPg4HPim5Kuj2hRNFfNt/F2BVIlj
+iZupikC5MT1LQaRwidkSNxCku1TfAyueiBwhLnFwTmIGNnhuDCutEVAD9kFmcJN2
+SznugAcPk4doX2+rL+ila+ThqgPzIkwTUHtnmjI0TI6xsDUlXz5S3UyudrE2Qsfz
+s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg=
+-----END CERTIFICATE-----""")
+
+        config = {
+            "tls_certificate_path": os.path.join(config_dir, "cert.pem"),
+            "tls_fingerprints": []
+        }
+
+        t = TestConfig()
+        t.read_config(config)
+        t.read_certificate_from_disk(require_cert_and_key=False)
+
+        warnings = self.flushWarnings()
+        self.assertEqual(len(warnings), 1)
+        self.assertEqual(
+            warnings[0]["message"],
+            (
+                "Self-signed TLS certificates will not be accepted by "
+                "Synapse 1.0. Please either provide a valid certificate, "
+                "or use Synapse's ACME support to provision one."
+            )
+        )
diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py
index b2536c1e69..71aa731439 100644
--- a/tests/crypto/test_event_signing.py
+++ b/tests/crypto/test_event_signing.py
@@ -18,7 +18,7 @@ import nacl.signing
 from unpaddedbase64 import decode_base64
 
 from synapse.crypto.event_signing import add_hashes_and_signatures
-from synapse.events.builder import EventBuilder
+from synapse.events import FrozenEvent
 
 from tests import unittest
 
@@ -40,20 +40,18 @@ class EventSigningTestCase(unittest.TestCase):
         self.signing_key.version = KEY_VER
 
     def test_sign_minimal(self):
-        builder = EventBuilder(
-            {
-                'event_id': "$0:domain",
-                'origin': "domain",
-                'origin_server_ts': 1000000,
-                'signatures': {},
-                'type': "X",
-                'unsigned': {'age_ts': 1000000},
-            }
-        )
+        event_dict = {
+            'event_id': "$0:domain",
+            'origin': "domain",
+            'origin_server_ts': 1000000,
+            'signatures': {},
+            'type': "X",
+            'unsigned': {'age_ts': 1000000},
+        }
 
-        add_hashes_and_signatures(builder, HOSTNAME, self.signing_key)
+        add_hashes_and_signatures(event_dict, HOSTNAME, self.signing_key)
 
-        event = builder.build()
+        event = FrozenEvent(event_dict)
 
         self.assertTrue(hasattr(event, 'hashes'))
         self.assertIn('sha256', event.hashes)
@@ -71,23 +69,21 @@ class EventSigningTestCase(unittest.TestCase):
         )
 
     def test_sign_message(self):
-        builder = EventBuilder(
-            {
-                'content': {'body': "Here is the message content"},
-                'event_id': "$0:domain",
-                'origin': "domain",
-                'origin_server_ts': 1000000,
-                'type': "m.room.message",
-                'room_id': "!r:domain",
-                'sender': "@u:domain",
-                'signatures': {},
-                'unsigned': {'age_ts': 1000000},
-            }
-        )
-
-        add_hashes_and_signatures(builder, HOSTNAME, self.signing_key)
-
-        event = builder.build()
+        event_dict = {
+            'content': {'body': "Here is the message content"},
+            'event_id': "$0:domain",
+            'origin': "domain",
+            'origin_server_ts': 1000000,
+            'type': "m.room.message",
+            'room_id': "!r:domain",
+            'sender': "@u:domain",
+            'signatures': {},
+            'unsigned': {'age_ts': 1000000},
+        }
+
+        add_hashes_and_signatures(event_dict, HOSTNAME, self.signing_key)
+
+        event = FrozenEvent(event_dict)
 
         self.assertTrue(hasattr(event, 'hashes'))
         self.assertIn('sha256', event.hashes)
diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py
index 8299dc72c8..d643bec887 100644
--- a/tests/crypto/test_keyring.py
+++ b/tests/crypto/test_keyring.py
@@ -63,6 +63,14 @@ class KeyringTestCase(unittest.TestCase):
         keys = self.mock_perspective_server.get_verify_keys()
         self.hs.config.perspectives = {self.mock_perspective_server.server_name: keys}
 
+    def assert_sentinel_context(self):
+        if LoggingContext.current_context() != LoggingContext.sentinel:
+            self.fail(
+                "Expected sentinel context but got %s" % (
+                    LoggingContext.current_context(),
+                )
+            )
+
     def check_context(self, _, expected):
         self.assertEquals(
             getattr(LoggingContext.current_context(), "request", None), expected
@@ -70,8 +78,6 @@ class KeyringTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_wait_for_previous_lookups(self):
-        sentinel_context = LoggingContext.current_context()
-
         kr = keyring.Keyring(self.hs)
 
         lookup_1_deferred = defer.Deferred()
@@ -99,8 +105,10 @@ class KeyringTestCase(unittest.TestCase):
                 ["server1"], {"server1": lookup_2_deferred}
             )
             self.assertFalse(wait_2_deferred.called)
+
             # ... so we should have reset the LoggingContext.
-            self.assertIs(LoggingContext.current_context(), sentinel_context)
+            self.assert_sentinel_context()
+
             wait_2_deferred.addBoth(self.check_context, "two")
 
             # let the first lookup complete (in the sentinel context)
@@ -198,8 +206,6 @@ class KeyringTestCase(unittest.TestCase):
         json1 = {}
         signedjson.sign.sign_json(json1, "server9", key1)
 
-        sentinel_context = LoggingContext.current_context()
-
         with LoggingContext("one") as context_one:
             context_one.request = "one"
 
@@ -213,7 +219,7 @@ class KeyringTestCase(unittest.TestCase):
 
             defer = kr.verify_json_for_server("server9", json1)
             self.assertFalse(defer.called)
-            self.assertIs(LoggingContext.current_context(), sentinel_context)
+            self.assert_sentinel_context()
             yield defer
 
             self.assertIs(LoggingContext.current_context(), context_one)
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index 8ae6556c0a..9bf395e923 100644
--- a/tests/handlers/test_directory.py
+++ b/tests/handlers/test_directory.py
@@ -121,6 +121,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase):
                 "action": "allow",
             }
         ]
+        config["room_list_publication_rules"] = []
 
         rd_config = RoomDirectoryConfig()
         rd_config.read_config(config)
diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py
index 9e08eac0a5..1c49bbbc3c 100644
--- a/tests/handlers/test_e2e_room_keys.py
+++ b/tests/handlers/test_e2e_room_keys.py
@@ -126,6 +126,78 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
         })
 
     @defer.inlineCallbacks
+    def test_update_version(self):
+        """Check that we can update versions.
+        """
+        version = yield self.handler.create_version(self.local_user, {
+            "algorithm": "m.megolm_backup.v1",
+            "auth_data": "first_version_auth_data",
+        })
+        self.assertEqual(version, "1")
+
+        res = yield self.handler.update_version(self.local_user, version, {
+            "algorithm": "m.megolm_backup.v1",
+            "auth_data": "revised_first_version_auth_data",
+            "version": version
+        })
+        self.assertDictEqual(res, {})
+
+        # check we can retrieve it as the current version
+        res = yield self.handler.get_version_info(self.local_user)
+        self.assertDictEqual(res, {
+            "algorithm": "m.megolm_backup.v1",
+            "auth_data": "revised_first_version_auth_data",
+            "version": version
+        })
+
+    @defer.inlineCallbacks
+    def test_update_missing_version(self):
+        """Check that we get a 404 on updating nonexistent versions
+        """
+        res = None
+        try:
+            yield self.handler.update_version(self.local_user, "1", {
+                "algorithm": "m.megolm_backup.v1",
+                "auth_data": "revised_first_version_auth_data",
+                "version": "1"
+            })
+        except errors.SynapseError as e:
+            res = e.code
+        self.assertEqual(res, 404)
+
+    @defer.inlineCallbacks
+    def test_update_bad_version(self):
+        """Check that we get a 400 if the version in the body is missing or
+        doesn't match
+        """
+        version = yield self.handler.create_version(self.local_user, {
+            "algorithm": "m.megolm_backup.v1",
+            "auth_data": "first_version_auth_data",
+        })
+        self.assertEqual(version, "1")
+
+        res = None
+        try:
+            yield self.handler.update_version(self.local_user, version, {
+                "algorithm": "m.megolm_backup.v1",
+                "auth_data": "revised_first_version_auth_data"
+            })
+        except errors.SynapseError as e:
+            res = e.code
+        self.assertEqual(res, 400)
+
+        res = None
+        try:
+            yield self.handler.update_version(self.local_user, version, {
+                "algorithm": "m.megolm_backup.v1",
+                "auth_data": "revised_first_version_auth_data",
+                "version": "incorrect"
+            })
+        except errors.SynapseError as e:
+            res = e.code
+        self.assertEqual(res, 400)
+
+    @defer.inlineCallbacks
     def test_delete_missing_version(self):
         """Check that we get a 404 on deleting nonexistent versions
         """
@@ -169,8 +241,8 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
         self.assertEqual(res, 404)
 
     @defer.inlineCallbacks
-    def test_get_missing_room_keys(self):
-        """Check that we get a 404 on querying missing room_keys
+    def test_get_missing_backup(self):
+        """Check that we get a 404 on querying missing backup
         """
         res = None
         try:
@@ -179,19 +251,20 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
             res = e.code
         self.assertEqual(res, 404)
 
-        # check we also get a 404 even if the version is valid
+    @defer.inlineCallbacks
+    def test_get_missing_room_keys(self):
+        """Check we get an empty response from an empty backup
+        """
         version = yield self.handler.create_version(self.local_user, {
             "algorithm": "m.megolm_backup.v1",
             "auth_data": "first_version_auth_data",
         })
         self.assertEqual(version, "1")
 
-        res = None
-        try:
-            yield self.handler.get_room_keys(self.local_user, version)
-        except errors.SynapseError as e:
-            res = e.code
-        self.assertEqual(res, 404)
+        res = yield self.handler.get_room_keys(self.local_user, version)
+        self.assertDictEqual(res, {
+            "rooms": {}
+        })
 
     # TODO: test the locking semantics when uploading room_keys,
     # although this is probably best done in sytest
@@ -345,17 +418,15 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
         # check for bulk-delete
         yield self.handler.upload_room_keys(self.local_user, version, room_keys)
         yield self.handler.delete_room_keys(self.local_user, version)
-        res = None
-        try:
-            yield self.handler.get_room_keys(
-                self.local_user,
-                version,
-                room_id="!abc:matrix.org",
-                session_id="c0ff33",
-            )
-        except errors.SynapseError as e:
-            res = e.code
-        self.assertEqual(res, 404)
+        res = yield self.handler.get_room_keys(
+            self.local_user,
+            version,
+            room_id="!abc:matrix.org",
+            session_id="c0ff33",
+        )
+        self.assertDictEqual(res, {
+            "rooms": {}
+        })
 
         # check for bulk-delete per room
         yield self.handler.upload_room_keys(self.local_user, version, room_keys)
@@ -364,17 +435,15 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
             version,
             room_id="!abc:matrix.org",
         )
-        res = None
-        try:
-            yield self.handler.get_room_keys(
-                self.local_user,
-                version,
-                room_id="!abc:matrix.org",
-                session_id="c0ff33",
-            )
-        except errors.SynapseError as e:
-            res = e.code
-        self.assertEqual(res, 404)
+        res = yield self.handler.get_room_keys(
+            self.local_user,
+            version,
+            room_id="!abc:matrix.org",
+            session_id="c0ff33",
+        )
+        self.assertDictEqual(res, {
+            "rooms": {}
+        })
 
         # check for bulk-delete per session
         yield self.handler.upload_room_keys(self.local_user, version, room_keys)
@@ -384,14 +453,12 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
             room_id="!abc:matrix.org",
             session_id="c0ff33",
         )
-        res = None
-        try:
-            yield self.handler.get_room_keys(
-                self.local_user,
-                version,
-                room_id="!abc:matrix.org",
-                session_id="c0ff33",
-            )
-        except errors.SynapseError as e:
-            res = e.code
-        self.assertEqual(res, 404)
+        res = yield self.handler.get_room_keys(
+            self.local_user,
+            version,
+            room_id="!abc:matrix.org",
+            session_id="c0ff33",
+        )
+        self.assertDictEqual(res, {
+            "rooms": {}
+        })
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 3e9a190727..c9c1506273 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -17,7 +17,8 @@ from mock import Mock
 
 from twisted.internet import defer
 
-from synapse.api.errors import ResourceLimitError
+from synapse.api.constants import UserTypes
+from synapse.api.errors import ResourceLimitError, SynapseError
 from synapse.handlers.register import RegistrationHandler
 from synapse.types import RoomAlias, UserID, create_requester
 
@@ -47,7 +48,7 @@ class RegistrationTestCase(unittest.TestCase):
             generate_access_token=Mock(return_value='secret')
         )
         self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator)
-        self.handler = self.hs.get_handlers().registration_handler
+        self.handler = self.hs.get_registration_handler()
         self.store = self.hs.get_datastore()
         self.hs.config.max_mau_value = 50
         self.lots_of_users = 100
@@ -64,6 +65,7 @@ class RegistrationTestCase(unittest.TestCase):
             requester, frank.localpart, "Frankie"
         )
         self.assertEquals(result_user_id, user_id)
+        self.assertTrue(result_token is not None)
         self.assertEquals(result_token, 'secret')
 
     @defer.inlineCallbacks
@@ -82,7 +84,7 @@ class RegistrationTestCase(unittest.TestCase):
             requester, local_part, None
         )
         self.assertEquals(result_user_id, user_id)
-        self.assertEquals(result_token, 'secret')
+        self.assertTrue(result_token is not None)
 
     @defer.inlineCallbacks
     def test_mau_limits_when_disabled(self):
@@ -130,27 +132,11 @@ class RegistrationTestCase(unittest.TestCase):
             yield self.handler.register(localpart="local_part")
 
     @defer.inlineCallbacks
-    def test_register_saml2_mau_blocked(self):
-        self.hs.config.limit_usage_by_mau = True
-        self.store.get_monthly_active_count = Mock(
-            return_value=defer.succeed(self.lots_of_users)
-        )
-        with self.assertRaises(ResourceLimitError):
-            yield self.handler.register_saml2(localpart="local_part")
-
-        self.store.get_monthly_active_count = Mock(
-            return_value=defer.succeed(self.hs.config.max_mau_value)
-        )
-        with self.assertRaises(ResourceLimitError):
-            yield self.handler.register_saml2(localpart="local_part")
-
-    @defer.inlineCallbacks
     def test_auto_create_auto_join_rooms(self):
         room_alias_str = "#room:test"
         self.hs.config.auto_join_rooms = [room_alias_str]
         res = yield self.handler.register(localpart='jeff')
         rooms = yield self.store.get_rooms_for_user(res[0])
-
         directory_handler = self.hs.get_handlers().directory_handler
         room_alias = RoomAlias.from_string(room_alias_str)
         room_id = yield directory_handler.get_association(room_alias)
@@ -184,3 +170,38 @@ class RegistrationTestCase(unittest.TestCase):
         res = yield self.handler.register(localpart='jeff')
         rooms = yield self.store.get_rooms_for_user(res[0])
         self.assertEqual(len(rooms), 0)
+
+    @defer.inlineCallbacks
+    def test_auto_create_auto_join_rooms_when_support_user_exists(self):
+        room_alias_str = "#room:test"
+        self.hs.config.auto_join_rooms = [room_alias_str]
+
+        self.store.is_support_user = Mock(return_value=True)
+        res = yield self.handler.register(localpart='support')
+        rooms = yield self.store.get_rooms_for_user(res[0])
+        self.assertEqual(len(rooms), 0)
+        directory_handler = self.hs.get_handlers().directory_handler
+        room_alias = RoomAlias.from_string(room_alias_str)
+        with self.assertRaises(SynapseError):
+            yield directory_handler.get_association(room_alias)
+
+    @defer.inlineCallbacks
+    def test_auto_create_auto_join_where_no_consent(self):
+        self.hs.config.user_consent_at_registration = True
+        self.hs.config.block_events_without_consent_error = "Error"
+        room_alias_str = "#room:test"
+        self.hs.config.auto_join_rooms = [room_alias_str]
+        res = yield self.handler.register(localpart='jeff')
+        yield self.handler.post_consent_actions(res[0])
+        rooms = yield self.store.get_rooms_for_user(res[0])
+        self.assertEqual(len(rooms), 0)
+
+    @defer.inlineCallbacks
+    def test_register_support_user(self):
+        res = yield self.handler.register(localpart='user', user_type=UserTypes.SUPPORT)
+        self.assertTrue(self.store.is_support_user(res[0]))
+
+    @defer.inlineCallbacks
+    def test_register_not_support_user(self):
+        res = yield self.handler.register(localpart='user')
+        self.assertFalse(self.store.is_support_user(res[0]))
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
new file mode 100644
index 0000000000..11f2bae698
--- /dev/null
+++ b/tests/handlers/test_user_directory.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from mock import Mock
+
+from twisted.internet import defer
+
+from synapse.api.constants import UserTypes
+from synapse.handlers.user_directory import UserDirectoryHandler
+from synapse.storage.roommember import ProfileInfo
+
+from tests import unittest
+from tests.utils import setup_test_homeserver
+
+
+class UserDirectoryHandlers(object):
+    def __init__(self, hs):
+        self.user_directory_handler = UserDirectoryHandler(hs)
+
+
+class UserDirectoryTestCase(unittest.TestCase):
+    """ Tests the UserDirectoryHandler. """
+
+    @defer.inlineCallbacks
+    def setUp(self):
+        hs = yield setup_test_homeserver(self.addCleanup)
+        self.store = hs.get_datastore()
+        hs.handlers = UserDirectoryHandlers(hs)
+
+        self.handler = hs.get_handlers().user_directory_handler
+
+    @defer.inlineCallbacks
+    def test_handle_local_profile_change_with_support_user(self):
+        support_user_id = "@support:test"
+        yield self.store.register(
+            user_id=support_user_id,
+            token="123",
+            password_hash=None,
+            user_type=UserTypes.SUPPORT
+        )
+
+        yield self.handler.handle_local_profile_change(support_user_id, None)
+        profile = yield self.store.get_user_in_directory(support_user_id)
+        self.assertTrue(profile is None)
+        display_name = 'display_name'
+
+        profile_info = ProfileInfo(
+            avatar_url='avatar_url',
+            display_name=display_name,
+        )
+        regular_user_id = '@regular:test'
+        yield self.handler.handle_local_profile_change(regular_user_id, profile_info)
+        profile = yield self.store.get_user_in_directory(regular_user_id)
+        self.assertTrue(profile['display_name'] == display_name)
+
+    @defer.inlineCallbacks
+    def test_handle_user_deactivated_support_user(self):
+        s_user_id = "@support:test"
+        self.store.register(
+            user_id=s_user_id,
+            token="123",
+            password_hash=None,
+            user_type=UserTypes.SUPPORT
+        )
+
+        self.store.remove_from_user_dir = Mock()
+        self.store.remove_from_user_in_public_room = Mock()
+        yield self.handler.handle_user_deactivated(s_user_id)
+        self.store.remove_from_user_dir.not_called()
+        self.store.remove_from_user_in_public_room.not_called()
+
+    @defer.inlineCallbacks
+    def test_handle_user_deactivated_regular_user(self):
+        r_user_id = "@regular:test"
+        self.store.register(user_id=r_user_id, token="123", password_hash=None)
+        self.store.remove_from_user_dir = Mock()
+        self.store.remove_from_user_in_public_room = Mock()
+        yield self.handler.handle_user_deactivated(r_user_id)
+        self.store.remove_from_user_dir.called_once_with(r_user_id)
+        self.store.remove_from_user_in_public_room.assert_called_once_with(r_user_id)
diff --git a/tests/http/__init__.py b/tests/http/__init__.py
index e69de29bb2..ee8010f598 100644
--- a/tests/http/__init__.py
+++ b/tests/http/__init__.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os.path
+
+from OpenSSL import SSL
+
+
+def get_test_cert_file():
+    """get the path to the test cert"""
+
+    # the cert file itself is made with:
+    #
+    # openssl req -x509 -newkey rsa:4096 -keyout server.pem  -out server.pem -days 36500 \
+    #     -nodes -subj '/CN=testserv'
+    return os.path.join(
+        os.path.dirname(__file__),
+        'server.pem',
+    )
+
+
+class ServerTLSContext(object):
+    """A TLS Context which presents our test cert."""
+    def __init__(self):
+        self.filename = get_test_cert_file()
+
+    def getContext(self):
+        ctx = SSL.Context(SSL.TLSv1_METHOD)
+        ctx.use_certificate_file(self.filename)
+        ctx.use_privatekey_file(self.filename)
+        return ctx
diff --git a/tests/http/federation/__init__.py b/tests/http/federation/__init__.py
new file mode 100644
index 0000000000..1453d04571
--- /dev/null
+++ b/tests/http/federation/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
new file mode 100644
index 0000000000..dcf184d3cf
--- /dev/null
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -0,0 +1,991 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from mock import Mock
+
+import treq
+from zope.interface import implementer
+
+from twisted.internet import defer
+from twisted.internet._sslverify import ClientTLSOptions, OpenSSLCertificateOptions
+from twisted.internet.protocol import Factory
+from twisted.protocols.tls import TLSMemoryBIOFactory
+from twisted.web.http import HTTPChannel
+from twisted.web.http_headers import Headers
+from twisted.web.iweb import IPolicyForHTTPS
+
+from synapse.crypto.context_factory import ClientTLSOptionsFactory
+from synapse.http.federation.matrix_federation_agent import (
+    MatrixFederationAgent,
+    _cache_period_from_headers,
+)
+from synapse.http.federation.srv_resolver import Server
+from synapse.util.caches.ttlcache import TTLCache
+from synapse.util.logcontext import LoggingContext
+
+from tests.http import ServerTLSContext
+from tests.server import FakeTransport, ThreadedMemoryReactorClock
+from tests.unittest import TestCase
+
+logger = logging.getLogger(__name__)
+
+
+class MatrixFederationAgentTests(TestCase):
+    def setUp(self):
+        self.reactor = ThreadedMemoryReactorClock()
+
+        self.mock_resolver = Mock()
+
+        self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
+
+        self.agent = MatrixFederationAgent(
+            reactor=self.reactor,
+            tls_client_options_factory=ClientTLSOptionsFactory(None),
+            _well_known_tls_policy=TrustingTLSPolicyForHTTPS(),
+            _srv_resolver=self.mock_resolver,
+            _well_known_cache=self.well_known_cache,
+        )
+
+    def _make_connection(self, client_factory, expected_sni):
+        """Builds a test server, and completes the outgoing client connection
+
+        Returns:
+            HTTPChannel: the test server
+        """
+
+        # build the test server
+        server_tls_protocol = _build_test_server()
+
+        # now, tell the client protocol factory to build the client protocol (it will be a
+        # _WrappingProtocol, around a TLSMemoryBIOProtocol, around an
+        # HTTP11ClientProtocol) and wire the output of said protocol up to the server via
+        # a FakeTransport.
+        #
+        # Normally this would be done by the TCP socket code in Twisted, but we are
+        # stubbing that out here.
+        client_protocol = client_factory.buildProtocol(None)
+        client_protocol.makeConnection(
+            FakeTransport(server_tls_protocol, self.reactor, client_protocol),
+        )
+
+        # tell the server tls protocol to send its stuff back to the client, too
+        server_tls_protocol.makeConnection(
+            FakeTransport(client_protocol, self.reactor, server_tls_protocol),
+        )
+
+        # give the reactor a pump to get the TLS juices flowing.
+        self.reactor.pump((0.1,))
+
+        # check the SNI
+        server_name = server_tls_protocol._tlsConnection.get_servername()
+        self.assertEqual(
+            server_name,
+            expected_sni,
+            "Expected SNI %s but got %s" % (expected_sni, server_name),
+        )
+
+        # fish the test server back out of the server-side TLS protocol.
+        return server_tls_protocol.wrappedProtocol
+
+    @defer.inlineCallbacks
+    def _make_get_request(self, uri):
+        """
+        Sends a simple GET request via the agent, and checks its logcontext management
+        """
+        with LoggingContext("one") as context:
+            fetch_d = self.agent.request(b'GET', uri)
+
+            # Nothing happened yet
+            self.assertNoResult(fetch_d)
+
+            # should have reset logcontext to the sentinel
+            _check_logcontext(LoggingContext.sentinel)
+
+            try:
+                fetch_res = yield fetch_d
+                defer.returnValue(fetch_res)
+            except Exception as e:
+                logger.info("Fetch of %s failed: %s", uri.decode("ascii"), e)
+                raise
+            finally:
+                _check_logcontext(context)
+
+    def _handle_well_known_connection(
+        self, client_factory, expected_sni, content, response_headers={},
+    ):
+        """Handle an outgoing HTTPs connection: wire it up to a server, check that the
+        request is for a .well-known, and send the response.
+
+        Args:
+            client_factory (IProtocolFactory): outgoing connection
+            expected_sni (bytes): SNI that we expect the outgoing connection to send
+            content (bytes): content to send back as the .well-known
+        Returns:
+            HTTPChannel: server impl
+        """
+        # make the connection for .well-known
+        well_known_server = self._make_connection(
+            client_factory,
+            expected_sni=expected_sni,
+        )
+        # check the .well-known request and send a response
+        self.assertEqual(len(well_known_server.requests), 1)
+        request = well_known_server.requests[0]
+        self._send_well_known_response(request, content, headers=response_headers)
+        return well_known_server
+
+    def _send_well_known_response(self, request, content, headers={}):
+        """Check that an incoming request looks like a valid .well-known request, and
+        send back the response.
+        """
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/.well-known/matrix/server')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'testserv'],
+        )
+        # send back a response
+        for k, v in headers.items():
+            request.setHeader(k, v)
+        request.write(content)
+        request.finish()
+
+        self.reactor.pump((0.1, ))
+
+    def test_get(self):
+        """
+        happy-path test of a GET request with an explicit port
+        """
+        self.reactor.lookups["testserv"] = "1.2.3.4"
+        test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b"testserv",
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'testserv:8448']
+        )
+        content = request.content.read()
+        self.assertEqual(content, b'')
+
+        # Deferred is still without a result
+        self.assertNoResult(test_d)
+
+        # send the headers
+        request.responseHeaders.setRawHeaders(b'Content-Type', [b'application/json'])
+        request.write('')
+
+        self.reactor.pump((0.1,))
+
+        response = self.successResultOf(test_d)
+
+        # that should give us a Response object
+        self.assertEqual(response.code, 200)
+
+        # Send the body
+        request.write('{ "a": 1 }'.encode('ascii'))
+        request.finish()
+
+        self.reactor.pump((0.1,))
+
+        # check it can be read
+        json = self.successResultOf(treq.json_content(response))
+        self.assertEqual(json, {"a": 1})
+
+    def test_get_ip_address(self):
+        """
+        Test the behaviour when the server name contains an explicit IP (with no port)
+        """
+        # there will be a getaddrinfo on the IP
+        self.reactor.lookups["1.2.3.4"] = "1.2.3.4"
+
+        test_d = self._make_get_request(b"matrix://1.2.3.4/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=None,
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'1.2.3.4'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    def test_get_ipv6_address(self):
+        """
+        Test the behaviour when the server name contains an explicit IPv6 address
+        (with no port)
+        """
+
+        # there will be a getaddrinfo on the IP
+        self.reactor.lookups["::1"] = "::1"
+
+        test_d = self._make_get_request(b"matrix://[::1]/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '::1')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=None,
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'[::1]'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    def test_get_ipv6_address_with_port(self):
+        """
+        Test the behaviour when the server name contains an explicit IPv6 address
+        (with explicit port)
+        """
+
+        # there will be a getaddrinfo on the IP
+        self.reactor.lookups["::1"] = "::1"
+
+        test_d = self._make_get_request(b"matrix://[::1]:80/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '::1')
+        self.assertEqual(port, 80)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=None,
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'[::1]:80'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    def test_get_no_srv_no_well_known(self):
+        """
+        Test the behaviour when the server name has no port, no SRV, and no well-known
+        """
+
+        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.reactor.lookups["testserv"] = "1.2.3.4"
+
+        test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # No SRV record lookup yet
+        self.mock_resolver.resolve_service.assert_not_called()
+
+        # there should be an attempt to connect on port 443 for the .well-known
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 443)
+
+        # fonx the connection
+        client_factory.clientConnectionFailed(None, Exception("nope"))
+
+        # attemptdelay on the hostnameendpoint is 0.3, so  takes that long before the
+        # .well-known request fails.
+        self.reactor.pump((0.4,))
+
+        # now there should be a SRV lookup
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.testserv",
+        )
+
+        # we should fall back to a direct connection
+        self.assertEqual(len(clients), 2)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[1]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b'testserv',
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'testserv'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    def test_get_well_known(self):
+        """Test the behaviour when the .well-known delegates elsewhere
+        """
+
+        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.reactor.lookups["testserv"] = "1.2.3.4"
+        self.reactor.lookups["target-server"] = "1::f"
+
+        test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # there should be an attempt to connect on port 443 for the .well-known
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 443)
+
+        self._handle_well_known_connection(
+            client_factory, expected_sni=b"testserv",
+            content=b'{ "m.server": "target-server" }',
+        )
+
+        # there should be a SRV lookup
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.target-server",
+        )
+
+        # now we should get a connection to the target server
+        self.assertEqual(len(clients), 2)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[1]
+        self.assertEqual(host, '1::f')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b'target-server',
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'target-server'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+        self.assertEqual(self.well_known_cache[b"testserv"], b"target-server")
+
+        # check the cache expires
+        self.reactor.pump((25 * 3600,))
+        self.well_known_cache.expire()
+        self.assertNotIn(b"testserv", self.well_known_cache)
+
+    def test_get_well_known_redirect(self):
+        """Test the behaviour when the server name has no port and no SRV record, but
+        the .well-known has a 300 redirect
+        """
+        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.reactor.lookups["testserv"] = "1.2.3.4"
+        self.reactor.lookups["target-server"] = "1::f"
+
+        test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # there should be an attempt to connect on port 443 for the .well-known
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients.pop()
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 443)
+
+        redirect_server = self._make_connection(
+            client_factory,
+            expected_sni=b"testserv",
+        )
+
+        # send a 302 redirect
+        self.assertEqual(len(redirect_server.requests), 1)
+        request = redirect_server.requests[0]
+        request.redirect(b'https://testserv/even_better_known')
+        request.finish()
+
+        self.reactor.pump((0.1, ))
+
+        # now there should be another connection
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients.pop()
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 443)
+
+        well_known_server = self._make_connection(
+            client_factory,
+            expected_sni=b"testserv",
+        )
+
+        self.assertEqual(len(well_known_server.requests), 1, "No request after 302")
+        request = well_known_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/even_better_known')
+        request.write(b'{ "m.server": "target-server" }')
+        request.finish()
+
+        self.reactor.pump((0.1, ))
+
+        # there should be a SRV lookup
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.target-server",
+        )
+
+        # now we should get a connection to the target server
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1::f')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b'target-server',
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'target-server'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+        self.assertEqual(self.well_known_cache[b"testserv"], b"target-server")
+
+        # check the cache expires
+        self.reactor.pump((25 * 3600,))
+        self.well_known_cache.expire()
+        self.assertNotIn(b"testserv", self.well_known_cache)
+
+    def test_get_invalid_well_known(self):
+        """
+        Test the behaviour when the server name has an *invalid* well-known (and no SRV)
+        """
+
+        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.reactor.lookups["testserv"] = "1.2.3.4"
+
+        test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # No SRV record lookup yet
+        self.mock_resolver.resolve_service.assert_not_called()
+
+        # there should be an attempt to connect on port 443 for the .well-known
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients.pop()
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 443)
+
+        self._handle_well_known_connection(
+            client_factory, expected_sni=b"testserv", content=b'NOT JSON',
+        )
+
+        # now there should be a SRV lookup
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.testserv",
+        )
+
+        # we should fall back to a direct connection
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients.pop()
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b'testserv',
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'testserv'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    def test_get_hostname_srv(self):
+        """
+        Test the behaviour when there is a single SRV record
+        """
+        self.mock_resolver.resolve_service.side_effect = lambda _: [
+            Server(host=b"srvtarget", port=8443)
+        ]
+        self.reactor.lookups["srvtarget"] = "1.2.3.4"
+
+        test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # the request for a .well-known will have failed with a DNS lookup error.
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.testserv",
+        )
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8443)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b'testserv',
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'testserv'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    def test_get_well_known_srv(self):
+        """Test the behaviour when the .well-known redirects to a place where there
+        is a SRV.
+        """
+        self.reactor.lookups["testserv"] = "1.2.3.4"
+        self.reactor.lookups["srvtarget"] = "5.6.7.8"
+
+        test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # there should be an attempt to connect on port 443 for the .well-known
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 443)
+
+        self.mock_resolver.resolve_service.side_effect = lambda _: [
+            Server(host=b"srvtarget", port=8443),
+        ]
+
+        self._handle_well_known_connection(
+            client_factory, expected_sni=b"testserv",
+            content=b'{ "m.server": "target-server" }',
+        )
+
+        # there should be a SRV lookup
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.target-server",
+        )
+
+        # now we should get a connection to the target of the SRV record
+        self.assertEqual(len(clients), 2)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[1]
+        self.assertEqual(host, '5.6.7.8')
+        self.assertEqual(port, 8443)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b'target-server',
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'target-server'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    def test_idna_servername(self):
+        """test the behaviour when the server name has idna chars in"""
+
+        self.mock_resolver.resolve_service.side_effect = lambda _: []
+
+        # the resolver is always called with the IDNA hostname as a native string.
+        self.reactor.lookups["xn--bcher-kva.com"] = "1.2.3.4"
+
+        # this is idna for bücher.com
+        test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # No SRV record lookup yet
+        self.mock_resolver.resolve_service.assert_not_called()
+
+        # there should be an attempt to connect on port 443 for the .well-known
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 443)
+
+        # fonx the connection
+        client_factory.clientConnectionFailed(None, Exception("nope"))
+
+        # attemptdelay on the hostnameendpoint is 0.3, so  takes that long before the
+        # .well-known request fails.
+        self.reactor.pump((0.4,))
+
+        # now there should have been a SRV lookup
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.xn--bcher-kva.com",
+        )
+
+        # We should fall back to port 8448
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 2)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[1]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8448)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b'xn--bcher-kva.com',
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'xn--bcher-kva.com'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    def test_idna_srv_target(self):
+        """test the behaviour when the target of a SRV record has idna chars"""
+
+        self.mock_resolver.resolve_service.side_effect = lambda _: [
+            Server(host=b"xn--trget-3qa.com", port=8443)  # târget.com
+        ]
+        self.reactor.lookups["xn--trget-3qa.com"] = "1.2.3.4"
+
+        test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar")
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        self.mock_resolver.resolve_service.assert_called_once_with(
+            b"_matrix._tcp.xn--bcher-kva.com",
+        )
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8443)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            expected_sni=b'xn--bcher-kva.com',
+        )
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b'GET')
+        self.assertEqual(request.path, b'/foo/bar')
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b'host'),
+            [b'xn--bcher-kva.com'],
+        )
+
+        # finish the request
+        request.finish()
+        self.reactor.pump((0.1,))
+        self.successResultOf(test_d)
+
+    @defer.inlineCallbacks
+    def do_get_well_known(self, serv):
+        try:
+            result = yield self.agent._get_well_known(serv)
+            logger.info("Result from well-known fetch: %s", result)
+        except Exception as e:
+            logger.warning("Error fetching well-known: %s", e)
+            raise
+        defer.returnValue(result)
+
+    def test_well_known_cache(self):
+        self.reactor.lookups["testserv"] = "1.2.3.4"
+
+        fetch_d = self.do_get_well_known(b'testserv')
+
+        # there should be an attempt to connect on port 443 for the .well-known
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 443)
+
+        well_known_server = self._handle_well_known_connection(
+            client_factory,
+            expected_sni=b"testserv",
+            response_headers={b'Cache-Control': b'max-age=10'},
+            content=b'{ "m.server": "target-server" }',
+        )
+
+        r = self.successResultOf(fetch_d)
+        self.assertEqual(r, b'target-server')
+
+        # close the tcp connection
+        well_known_server.loseConnection()
+
+        # repeat the request: it should hit the cache
+        fetch_d = self.do_get_well_known(b'testserv')
+        r = self.successResultOf(fetch_d)
+        self.assertEqual(r, b'target-server')
+
+        # expire the cache
+        self.reactor.pump((10.0,))
+
+        # now it should connect again
+        fetch_d = self.do_get_well_known(b'testserv')
+
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 443)
+
+        self._handle_well_known_connection(
+            client_factory,
+            expected_sni=b"testserv",
+            content=b'{ "m.server": "other-server" }',
+        )
+
+        r = self.successResultOf(fetch_d)
+        self.assertEqual(r, b'other-server')
+
+
+class TestCachePeriodFromHeaders(TestCase):
+    def test_cache_control(self):
+        # uppercase
+        self.assertEqual(
+            _cache_period_from_headers(
+                Headers({b'Cache-Control': [b'foo, Max-Age = 100, bar']}),
+            ), 100,
+        )
+
+        # missing value
+        self.assertIsNone(_cache_period_from_headers(
+            Headers({b'Cache-Control': [b'max-age=, bar']}),
+        ))
+
+        # hackernews: bogus due to semicolon
+        self.assertIsNone(_cache_period_from_headers(
+            Headers({b'Cache-Control': [b'private; max-age=0']}),
+        ))
+
+        # github
+        self.assertEqual(
+            _cache_period_from_headers(
+                Headers({b'Cache-Control': [b'max-age=0, private, must-revalidate']}),
+            ), 0,
+        )
+
+        # google
+        self.assertEqual(
+            _cache_period_from_headers(
+                Headers({b'cache-control': [b'private, max-age=0']}),
+            ), 0,
+        )
+
+    def test_expires(self):
+        self.assertEqual(
+            _cache_period_from_headers(
+                Headers({b'Expires': [b'Wed, 30 Jan 2019 07:35:33 GMT']}),
+                time_now=lambda: 1548833700
+            ), 33,
+        )
+
+        # cache-control overrides expires
+        self.assertEqual(
+            _cache_period_from_headers(
+                Headers({
+                    b'cache-control': [b'max-age=10'],
+                    b'Expires': [b'Wed, 30 Jan 2019 07:35:33 GMT']
+                }),
+                time_now=lambda: 1548833700
+            ), 10,
+        )
+
+        # invalid expires means immediate expiry
+        self.assertEqual(
+            _cache_period_from_headers(
+                Headers({b'Expires': [b'0']}),
+            ), 0,
+        )
+
+
+def _check_logcontext(context):
+    current = LoggingContext.current_context()
+    if current is not context:
+        raise AssertionError(
+            "Expected logcontext %s but was %s" % (context, current),
+        )
+
+
+def _build_test_server():
+    """Construct a test server
+
+    This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol
+
+    Returns:
+        TLSMemoryBIOProtocol
+    """
+    server_factory = Factory.forProtocol(HTTPChannel)
+    # Request.finish expects the factory to have a 'log' method.
+    server_factory.log = _log_request
+
+    server_tls_factory = TLSMemoryBIOFactory(
+        ServerTLSContext(), isClient=False, wrappedFactory=server_factory,
+    )
+
+    return server_tls_factory.buildProtocol(None)
+
+
+def _log_request(request):
+    """Implements Factory.log, which is expected by Request.finish"""
+    logger.info("Completed request %s", request)
+
+
+@implementer(IPolicyForHTTPS)
+class TrustingTLSPolicyForHTTPS(object):
+    """An IPolicyForHTTPS which doesn't do any certificate verification"""
+    def creatorForNetloc(self, hostname, port):
+        certificateOptions = OpenSSLCertificateOptions()
+        return ClientTLSOptions(hostname, certificateOptions.getContext())
diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py
new file mode 100644
index 0000000000..a872e2441e
--- /dev/null
+++ b/tests/http/federation/test_srv_resolver.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from twisted.internet import defer
+from twisted.internet.defer import Deferred
+from twisted.internet.error import ConnectError
+from twisted.names import dns, error
+
+from synapse.http.federation.srv_resolver import SrvResolver
+from synapse.util.logcontext import LoggingContext
+
+from tests import unittest
+from tests.utils import MockClock
+
+
+class SrvResolverTestCase(unittest.TestCase):
+    def test_resolve(self):
+        dns_client_mock = Mock()
+
+        service_name = b"test_service.example.com"
+        host_name = b"example.com"
+
+        answer_srv = dns.RRHeader(
+            type=dns.SRV, payload=dns.Record_SRV(target=host_name)
+        )
+
+        result_deferred = Deferred()
+        dns_client_mock.lookupService.return_value = result_deferred
+
+        cache = {}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        @defer.inlineCallbacks
+        def do_lookup():
+
+            with LoggingContext("one") as ctx:
+                resolve_d = resolver.resolve_service(service_name)
+
+                self.assertNoResult(resolve_d)
+
+                # should have reset to the sentinel context
+                self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
+
+                result = yield resolve_d
+
+                # should have restored our context
+                self.assertIs(LoggingContext.current_context(), ctx)
+
+                defer.returnValue(result)
+
+        test_d = do_lookup()
+        self.assertNoResult(test_d)
+
+        dns_client_mock.lookupService.assert_called_once_with(service_name)
+
+        result_deferred.callback(
+            ([answer_srv], None, None)
+        )
+
+        servers = self.successResultOf(test_d)
+
+        self.assertEquals(len(servers), 1)
+        self.assertEquals(servers, cache[service_name])
+        self.assertEquals(servers[0].host, host_name)
+
+    @defer.inlineCallbacks
+    def test_from_cache_expired_and_dns_fail(self):
+        dns_client_mock = Mock()
+        dns_client_mock.lookupService.return_value = defer.fail(error.DNSServerError())
+
+        service_name = b"test_service.example.com"
+
+        entry = Mock(spec_set=["expires"])
+        entry.expires = 0
+
+        cache = {service_name: [entry]}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        servers = yield resolver.resolve_service(service_name)
+
+        dns_client_mock.lookupService.assert_called_once_with(service_name)
+
+        self.assertEquals(len(servers), 1)
+        self.assertEquals(servers, cache[service_name])
+
+    @defer.inlineCallbacks
+    def test_from_cache(self):
+        clock = MockClock()
+
+        dns_client_mock = Mock(spec_set=['lookupService'])
+        dns_client_mock.lookupService = Mock(spec_set=[])
+
+        service_name = b"test_service.example.com"
+
+        entry = Mock(spec_set=["expires"])
+        entry.expires = 999999999
+
+        cache = {service_name: [entry]}
+        resolver = SrvResolver(
+            dns_client=dns_client_mock, cache=cache, get_time=clock.time,
+        )
+
+        servers = yield resolver.resolve_service(service_name)
+
+        self.assertFalse(dns_client_mock.lookupService.called)
+
+        self.assertEquals(len(servers), 1)
+        self.assertEquals(servers, cache[service_name])
+
+    @defer.inlineCallbacks
+    def test_empty_cache(self):
+        dns_client_mock = Mock()
+
+        dns_client_mock.lookupService.return_value = defer.fail(error.DNSServerError())
+
+        service_name = b"test_service.example.com"
+
+        cache = {}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        with self.assertRaises(error.DNSServerError):
+            yield resolver.resolve_service(service_name)
+
+    @defer.inlineCallbacks
+    def test_name_error(self):
+        dns_client_mock = Mock()
+
+        dns_client_mock.lookupService.return_value = defer.fail(error.DNSNameError())
+
+        service_name = b"test_service.example.com"
+
+        cache = {}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        servers = yield resolver.resolve_service(service_name)
+
+        self.assertEquals(len(servers), 0)
+        self.assertEquals(len(cache), 0)
+
+    def test_disabled_service(self):
+        """
+        test the behaviour when there is a single record which is ".".
+        """
+        service_name = b"test_service.example.com"
+
+        lookup_deferred = Deferred()
+        dns_client_mock = Mock()
+        dns_client_mock.lookupService.return_value = lookup_deferred
+        cache = {}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        resolve_d = resolver.resolve_service(service_name)
+        self.assertNoResult(resolve_d)
+
+        # returning a single "." should make the lookup fail with a ConenctError
+        lookup_deferred.callback((
+            [dns.RRHeader(type=dns.SRV, payload=dns.Record_SRV(target=b"."))],
+            None,
+            None,
+        ))
+
+        self.failureResultOf(resolve_d, ConnectError)
+
+    def test_non_srv_answer(self):
+        """
+        test the behaviour when the dns server gives us a spurious non-SRV response
+        """
+        service_name = b"test_service.example.com"
+
+        lookup_deferred = Deferred()
+        dns_client_mock = Mock()
+        dns_client_mock.lookupService.return_value = lookup_deferred
+        cache = {}
+        resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
+
+        resolve_d = resolver.resolve_service(service_name)
+        self.assertNoResult(resolve_d)
+
+        lookup_deferred.callback((
+            [
+                dns.RRHeader(type=dns.A, payload=dns.Record_A()),
+                dns.RRHeader(type=dns.SRV, payload=dns.Record_SRV(target=b"host")),
+            ],
+            None,
+            None,
+        ))
+
+        servers = self.successResultOf(resolve_d)
+
+        self.assertEquals(len(servers), 1)
+        self.assertEquals(servers, cache[service_name])
+        self.assertEquals(servers[0].host, b"host")
diff --git a/tests/http/server.pem b/tests/http/server.pem
new file mode 100644
index 0000000000..0584cf1a80
--- /dev/null
+++ b/tests/http/server.pem
@@ -0,0 +1,81 @@
+-----BEGIN PRIVATE KEY-----
+MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCgF43/3lAgJ+p0
+x7Rn8UcL8a4fctvdkikvZrCngw96LkB34Evfq8YGWlOVjU+f9naUJLAKMatmAfEN
+r+rMX4VOXmpTwuu6iLtqwreUrRFMESyrmvQxa15p+y85gkY0CFmXMblv6ORbxHTG
+ncBGwST4WK4Poewcgt6jcISFCESTUKu1zc3cw1ANIDRyDLB5K44KwIe36dcKckyN
+Kdtv4BJ+3fcIZIkPJH62zqCypgFF1oiFt40uJzClxgHdJZlKYpgkfnDTckw4Y/Mx
+9k8BbE310KAzUNMV9H7I1eEolzrNr66FQj1eN64X/dqO8lTbwCqAd4diCT4sIUk0
+0SVsAUjNd3g8j651hx+Qb1t8fuOjrny8dmeMxtUgIBHoQcpcj76R55Fs7KZ9uar0
+8OFTyGIze51W1jG2K/7/5M1zxIqrA+7lsXu5OR81s7I+Ng/UUAhiHA/z+42/aiNa
+qEuk6tqj3rHfLctnCbtZ+JrRNqSSwEi8F0lMA021ivEd2eJV+284OyJjhXOmKHrX
+QADHrmS7Sh4syTZvRNm9n+qWID0KdDr2Sji/KnS3Enp44HDQ4xriT6/xhwEGsyuX
+oH5aAkdLznulbWkHBbyx1SUQSTLpOqzaioF9m1vRrLsFvrkrY3D253mPJ5eU9HM/
+dilduFcUgj4rz+6cdXUAh+KK/v95zwIDAQABAoICAFG5tJPaOa0ws0/KYx5s3YgL
+aIhFalhCNSQtmCDrlwsYcXDA3/rfBchYdDL0YKGYgBBAal3J3WXFt/j0xThvyu2m
+5UC9UPl4s7RckrsjXqEmY1d3UxGnbhtMT19cUdpeKN42VCP9EBaIw9Rg07dLAkSF
+gNYaIx6q8F0fI4eGIPvTQtUcqur4CfWpaxyNvckdovV6M85/YXfDwbCOnacPDGIX
+jfSK3i0MxGMuOHr6o8uzKR6aBUh6WStHWcw7VXXTvzdiFNbckmx3Gb93rf1b/LBw
+QFfx+tBKcC62gKroCOzXso/0sL9YTVeSD/DJZOiJwSiz3Dj/3u1IUMbVvfTU8wSi
+CYS7Z+jHxwSOCSSNTXm1wO/MtDsNKbI1+R0cohr/J9pOMQvrVh1+2zSDOFvXAQ1S
+yvjn+uqdmijRoV2VEGVHd+34C+ci7eJGAhL/f92PohuuFR2shUETgGWzpACZSJwg
+j1d90Hs81hj07vWRb+xCeDh00vimQngz9AD8vYvv/S4mqRGQ6TZdfjLoUwSTg0JD
+6sQgRXX026gQhLhn687vLKZfHwzQPZkpQdxOR0dTZ/ho/RyGGRJXH4kN4cA2tPr+
+AKYQ29YXGlEzGG7OqikaZcprNWG6UFgEpuXyBxCgp9r4ladZo3J+1Rhgus8ZYatd
+uO98q3WEBmP6CZ2n32mBAoIBAQDS/c/ybFTos0YpGHakwdmSfj5OOQJto2y8ywfG
+qDHwO0ebcpNnS1+MA+7XbKUQb/3Iq7iJljkkzJG2DIJ6rpKynYts1ViYpM7M/t0T
+W3V1gvUcUL62iqkgws4pnpWmubFkqV31cPSHcfIIclnzeQ1aOEGsGHNAvhty0ciC
+DnkJACbqApvopFLOR5f6UFTtKExE+hDH0WqgpsCAKJ1L4g6pBzZatI32/CN9JEVU
+tDbxLV75hHlFFjUrG7nT1rPyr/gI8Ceh9/2xeXPfjJUR0PrG3U1nwLqUCZkvFzO6
+XpN2+A+/v4v5xqMjKDKDFy1oq6SCMomwv/viw6wl/84TMbolAoIBAQDCPiMecnR8
+REik6tqVzQO/uSe9ZHjz6J15t5xdwaI6HpSwLlIkQPkLTjyXtFpemK5DOYRxrJvQ
+remfrZrN2qtLlb/DKpuGPWRsPOvWCrSuNEp48ivUehtclljrzxAFfy0sM+fWeJ48
+nTnR+td9KNhjNtZixzWdAy/mE+jdaMsXVnk66L73Uz+2WsnvVMW2R6cpCR0F2eP/
+B4zDWRqlT2w47sePAB81mFYSQLvPC6Xcgg1OqMubfiizJI49c8DO6Jt+FFYdsxhd
+kG52Eqa/Net6rN3ueiS6yXL5TU3Y6g96bPA2KyNCypucGcddcBfqaiVx/o4AH6yT
+NrdsrYtyvk/jAoIBAQDHUwKVeeRJJbvdbQAArCV4MI155n+1xhMe1AuXkCQFWGtQ
+nlBE4D72jmyf1UKnIbW2Uwv15xY6/ouVWYIWlj9+QDmMaozVP7Uiko+WDuwLRNl8
+k4dn+dzHV2HejbPBG2JLv3lFOx23q1zEwArcaXrExaq9Ayg2fKJ/uVHcFAIiD6Oz
+pR1XDY4w1A/uaN+iYFSVQUyDCQLbnEz1hej73CaPZoHh9Pq83vxD5/UbjVjuRTeZ
+L55FNzKpc/r89rNvTPBcuUwnxplDhYKDKVNWzn9rSXwrzTY2Tk8J3rh+k4RqevSd
+6D47jH1n5Dy7/TRn0ueKHGZZtTUnyEUkbOJo3ayFAoIBAHKDyZaQqaX9Z8p6fwWj
+yVsFoK0ih8BcWkLBAdmwZ6DWGJjJpjmjaG/G3ygc9s4gO1R8m12dAnuDnGE8KzDD
+gwtbrKM2Alyg4wyA2hTlWOH/CAzH0RlCJ9Fs/d1/xJVJBeuyajLiB3/6vXTS6qnq
+I7BSSxAPG8eGcn21LSsjNeB7ZZtaTgNnu/8ZBUYo9yrgkWc67TZe3/ChldYxOOlO
+qqHh/BqNWtjxB4VZTp/g4RbgQVInZ2ozdXEv0v/dt0UEk29ANAjsZif7F3RayJ2f
+/0TilzCaJ/9K9pKNhaClVRy7Dt8QjYg6BIWCGSw4ApF7pLnQ9gySn95mersCkVzD
+YDsCggEAb0E/TORjQhKfNQvahyLfQFm151e+HIoqBqa4WFyfFxe/IJUaLH/JSSFw
+VohbQqPdCmaAeuQ8ERL564DdkcY5BgKcax79fLLCOYP5bT11aQx6uFpfl2Dcm6Z9
+QdCRI4jzPftsd5fxLNH1XtGyC4t6vTic4Pji2O71WgWzx0j5v4aeDY4sZQeFxqCV
+/q7Ee8hem1Rn5RFHu14FV45RS4LAWl6wvf5pQtneSKzx8YL0GZIRRytOzdEfnGKr
+FeUlAj5uL+5/p0ZEgM7gPsEBwdm8scF79qSUn8UWSoXNeIauF9D4BDg8RZcFFxka
+KILVFsq3cQC+bEnoM4eVbjEQkGs1RQ==
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIE/jCCAuagAwIBAgIJANFtVaGvJWZlMA0GCSqGSIb3DQEBCwUAMBMxETAPBgNV
+BAMMCHRlc3RzZXJ2MCAXDTE5MDEyNzIyMDIzNloYDzIxMTkwMTAzMjIwMjM2WjAT
+MREwDwYDVQQDDAh0ZXN0c2VydjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
+ggIBAKAXjf/eUCAn6nTHtGfxRwvxrh9y292SKS9msKeDD3ouQHfgS9+rxgZaU5WN
+T5/2dpQksAoxq2YB8Q2v6sxfhU5ealPC67qIu2rCt5StEUwRLKua9DFrXmn7LzmC
+RjQIWZcxuW/o5FvEdMadwEbBJPhYrg+h7ByC3qNwhIUIRJNQq7XNzdzDUA0gNHIM
+sHkrjgrAh7fp1wpyTI0p22/gEn7d9whkiQ8kfrbOoLKmAUXWiIW3jS4nMKXGAd0l
+mUpimCR+cNNyTDhj8zH2TwFsTfXQoDNQ0xX0fsjV4SiXOs2vroVCPV43rhf92o7y
+VNvAKoB3h2IJPiwhSTTRJWwBSM13eDyPrnWHH5BvW3x+46OufLx2Z4zG1SAgEehB
+ylyPvpHnkWzspn25qvTw4VPIYjN7nVbWMbYr/v/kzXPEiqsD7uWxe7k5HzWzsj42
+D9RQCGIcD/P7jb9qI1qoS6Tq2qPesd8ty2cJu1n4mtE2pJLASLwXSUwDTbWK8R3Z
+4lX7bzg7ImOFc6YoetdAAMeuZLtKHizJNm9E2b2f6pYgPQp0OvZKOL8qdLcSenjg
+cNDjGuJPr/GHAQazK5egfloCR0vOe6VtaQcFvLHVJRBJMuk6rNqKgX2bW9GsuwW+
+uStjcPbneY8nl5T0cz92KV24VxSCPivP7px1dQCH4or+/3nPAgMBAAGjUzBRMB0G
+A1UdDgQWBBQcQZpzLzTk5KdS/Iz7sGCV7gTd/zAfBgNVHSMEGDAWgBQcQZpzLzTk
+5KdS/Iz7sGCV7gTd/zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IC
+AQAr/Pgha57jqYsDDX1LyRrVdqoVBpLBeB7x/p9dKYm7S6tBTDFNMZ0SZyQP8VEG
+7UoC9/OQ9nCdEMoR7ZKpQsmipwcIqpXHS6l4YOkf5EEq5jpMgvlEesHmBJJeJew/
+FEPDl1bl8d0tSrmWaL3qepmwzA+2lwAAouWk2n+rLiP8CZ3jZeoTXFqYYrUlEqO9
+fHMvuWqTV4KCSyNY+GWCrnHetulgKHlg+W2J1mZnrCKcBhWf9C2DesTJO+JldIeM
+ornTFquSt21hZi+k3aySuMn2N3MWiNL8XsZVsAnPSs0zA+2fxjJkShls8Gc7cCvd
+a6XrNC+PY6pONguo7rEU4HiwbvnawSTngFFglmH/ImdA/HkaAekW6o82aI8/UxFx
+V9fFMO3iKDQdOrg77hI1bx9RlzKNZZinE2/Pu26fWd5d2zqDWCjl8ykGQRAfXgYN
+H3BjgyXLl+ao5/pOUYYtzm3ruTXTgRcy5hhL6hVTYhSrf9vYh4LNIeXNKnZ78tyG
+TX77/kU2qXhBGCFEUUMqUNV/+ITir2lmoxVjknt19M07aGr8C7SgYt6Rs+qDpMiy
+JurgvRh8LpVq4pHx1efxzxCFmo58DMrG40I0+CF3y/niNpOb1gp2wAqByRiORkds
+f0ytW6qZ0TpHbD6gOtQLYDnhx3ISuX+QYSekVwQUpffeWQ==
+-----END CERTIFICATE-----
diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py
index f3cb1423f0..b03b37affe 100644
--- a/tests/http/test_fedclient.py
+++ b/tests/http/test_fedclient.py
@@ -15,41 +15,138 @@
 
 from mock import Mock
 
+from twisted.internet import defer
 from twisted.internet.defer import TimeoutError
 from twisted.internet.error import ConnectingCancelledError, DNSLookupError
+from twisted.test.proto_helpers import StringTransport
 from twisted.web.client import ResponseNeverReceived
 from twisted.web.http import HTTPChannel
 
+from synapse.api.errors import RequestSendFailed
 from synapse.http.matrixfederationclient import (
     MatrixFederationHttpClient,
     MatrixFederationRequest,
 )
+from synapse.util.logcontext import LoggingContext
 
 from tests.server import FakeTransport
 from tests.unittest import HomeserverTestCase
 
 
+def check_logcontext(context):
+    current = LoggingContext.current_context()
+    if current is not context:
+        raise AssertionError(
+            "Expected logcontext %s but was %s" % (context, current),
+        )
+
+
 class FederationClientTests(HomeserverTestCase):
     def make_homeserver(self, reactor, clock):
-
         hs = self.setup_test_homeserver(reactor=reactor, clock=clock)
-        hs.tls_client_options_factory = None
         return hs
 
     def prepare(self, reactor, clock, homeserver):
-
-        self.cl = MatrixFederationHttpClient(self.hs)
+        self.cl = MatrixFederationHttpClient(self.hs, None)
         self.reactor.lookups["testserv"] = "1.2.3.4"
 
+    def test_client_get(self):
+        """
+        happy-path test of a GET request
+        """
+        @defer.inlineCallbacks
+        def do_request():
+            with LoggingContext("one") as context:
+                fetch_d = self.cl.get_json("testserv:8008", "foo/bar")
+
+                # Nothing happened yet
+                self.assertNoResult(fetch_d)
+
+                # should have reset logcontext to the sentinel
+                check_logcontext(LoggingContext.sentinel)
+
+                try:
+                    fetch_res = yield fetch_d
+                    defer.returnValue(fetch_res)
+                finally:
+                    check_logcontext(context)
+
+        test_d = do_request()
+
+        self.pump()
+
+        # Nothing happened yet
+        self.assertNoResult(test_d)
+
+        # Make sure treq is trying to connect
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8008)
+
+        # complete the connection and wire it up to a fake transport
+        protocol = factory.buildProtocol(None)
+        transport = StringTransport()
+        protocol.makeConnection(transport)
+
+        # that should have made it send the request to the transport
+        self.assertRegex(transport.value(), b"^GET /foo/bar")
+        self.assertRegex(transport.value(), b"Host: testserv:8008")
+
+        # Deferred is still without a result
+        self.assertNoResult(test_d)
+
+        # Send it the HTTP response
+        res_json = '{ "a": 1 }'.encode('ascii')
+        protocol.dataReceived(
+            b"HTTP/1.1 200 OK\r\n"
+            b"Server: Fake\r\n"
+            b"Content-Type: application/json\r\n"
+            b"Content-Length: %i\r\n"
+            b"\r\n"
+            b"%s" % (len(res_json), res_json)
+        )
+
+        self.pump()
+
+        res = self.successResultOf(test_d)
+
+        # check the response is as expected
+        self.assertEqual(res, {"a": 1})
+
     def test_dns_error(self):
         """
-        If the DNS raising returns an error, it will bubble up.
+        If the DNS lookup returns an error, it will bubble up.
         """
         d = self.cl.get_json("testserv2:8008", "foo/bar", timeout=10000)
         self.pump()
 
         f = self.failureResultOf(d)
-        self.assertIsInstance(f.value, DNSLookupError)
+        self.assertIsInstance(f.value, RequestSendFailed)
+        self.assertIsInstance(f.value.inner_exception, DNSLookupError)
+
+    def test_client_connection_refused(self):
+        d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
+
+        self.pump()
+
+        # Nothing happened yet
+        self.assertNoResult(d)
+
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, '1.2.3.4')
+        self.assertEqual(port, 8008)
+        e = Exception("go away")
+        factory.clientConnectionFailed(None, e)
+        self.pump(0.5)
+
+        f = self.failureResultOf(d)
+
+        self.assertIsInstance(f.value, RequestSendFailed)
+        self.assertIs(f.value.inner_exception, e)
 
     def test_client_never_connect(self):
         """
@@ -61,7 +158,7 @@ class FederationClientTests(HomeserverTestCase):
         self.pump()
 
         # Nothing happened yet
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Make sure treq is trying to connect
         clients = self.reactor.tcpClients
@@ -70,13 +167,17 @@ class FederationClientTests(HomeserverTestCase):
         self.assertEqual(clients[0][1], 8008)
 
         # Deferred is still without a result
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Push by enough to time it out
         self.reactor.advance(10.5)
         f = self.failureResultOf(d)
 
-        self.assertIsInstance(f.value, (ConnectingCancelledError, TimeoutError))
+        self.assertIsInstance(f.value, RequestSendFailed)
+        self.assertIsInstance(
+            f.value.inner_exception,
+            (ConnectingCancelledError, TimeoutError),
+        )
 
     def test_client_connect_no_response(self):
         """
@@ -88,7 +189,7 @@ class FederationClientTests(HomeserverTestCase):
         self.pump()
 
         # Nothing happened yet
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Make sure treq is trying to connect
         clients = self.reactor.tcpClients
@@ -101,13 +202,14 @@ class FederationClientTests(HomeserverTestCase):
         client.makeConnection(conn)
 
         # Deferred is still without a result
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Push by enough to time it out
         self.reactor.advance(10.5)
         f = self.failureResultOf(d)
 
-        self.assertIsInstance(f.value, ResponseNeverReceived)
+        self.assertIsInstance(f.value, RequestSendFailed)
+        self.assertIsInstance(f.value.inner_exception, ResponseNeverReceived)
 
     def test_client_gets_headers(self):
         """
@@ -128,7 +230,7 @@ class FederationClientTests(HomeserverTestCase):
         client.makeConnection(conn)
 
         # Deferred does not have a result
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Send it the HTTP response
         client.dataReceived(b"HTTP/1.1 200 OK\r\nServer: Fake\r\n\r\n")
@@ -152,7 +254,7 @@ class FederationClientTests(HomeserverTestCase):
         client.makeConnection(conn)
 
         # Deferred does not have a result
-        self.assertFalse(d.called)
+        self.assertNoResult(d)
 
         # Send it the HTTP response
         client.dataReceived(
@@ -188,3 +290,42 @@ class FederationClientTests(HomeserverTestCase):
         request = server.requests[0]
         content = request.content.read()
         self.assertEqual(content, b'{"a":"b"}')
+
+    def test_closes_connection(self):
+        """Check that the client closes unused HTTP connections"""
+        d = self.cl.get_json("testserv:8008", "foo/bar")
+
+        self.pump()
+
+        # there should have been a call to connectTCP
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (_host, _port, factory, _timeout, _bindAddress) = clients[0]
+
+        # complete the connection and wire it up to a fake transport
+        client = factory.buildProtocol(None)
+        conn = StringTransport()
+        client.makeConnection(conn)
+
+        # that should have made it send the request to the connection
+        self.assertRegex(conn.value(), b"^GET /foo/bar")
+
+        # Send the HTTP response
+        client.dataReceived(
+            b"HTTP/1.1 200 OK\r\n"
+            b"Content-Type: application/json\r\n"
+            b"Content-Length: 2\r\n"
+            b"\r\n"
+            b"{}"
+        )
+
+        # We should get a successful response
+        r = self.successResultOf(d)
+        self.assertEqual(r, {})
+
+        self.assertFalse(conn.disconnecting)
+
+        # wait for a while
+        self.pump(120)
+
+        self.assertTrue(conn.disconnecting)
diff --git a/tests/patch_inline_callbacks.py b/tests/patch_inline_callbacks.py
new file mode 100644
index 0000000000..0f613945c8
--- /dev/null
+++ b/tests/patch_inline_callbacks.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import functools
+import sys
+
+from twisted.internet import defer
+from twisted.internet.defer import Deferred
+from twisted.python.failure import Failure
+
+
+def do_patch():
+    """
+    Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit
+    """
+
+    from synapse.util.logcontext import LoggingContext
+
+    orig_inline_callbacks = defer.inlineCallbacks
+
+    def new_inline_callbacks(f):
+
+        orig = orig_inline_callbacks(f)
+
+        @functools.wraps(f)
+        def wrapped(*args, **kwargs):
+            start_context = LoggingContext.current_context()
+
+            try:
+                res = orig(*args, **kwargs)
+            except Exception:
+                if LoggingContext.current_context() != start_context:
+                    err = "%s changed context from %s to %s on exception" % (
+                        f, start_context, LoggingContext.current_context()
+                    )
+                    print(err, file=sys.stderr)
+                    raise Exception(err)
+                raise
+
+            if not isinstance(res, Deferred) or res.called:
+                if LoggingContext.current_context() != start_context:
+                    err = "%s changed context from %s to %s" % (
+                        f, start_context, LoggingContext.current_context()
+                    )
+                    # print the error to stderr because otherwise all we
+                    # see in travis-ci is the 500 error
+                    print(err, file=sys.stderr)
+                    raise Exception(err)
+                return res
+
+            if LoggingContext.current_context() != LoggingContext.sentinel:
+                err = (
+                    "%s returned incomplete deferred in non-sentinel context "
+                    "%s (start was %s)"
+                ) % (
+                    f, LoggingContext.current_context(), start_context,
+                )
+                print(err, file=sys.stderr)
+                raise Exception(err)
+
+            def check_ctx(r):
+                if LoggingContext.current_context() != start_context:
+                    err = "%s completion of %s changed context from %s to %s" % (
+                        "Failure" if isinstance(r, Failure) else "Success",
+                        f, start_context, LoggingContext.current_context(),
+                    )
+                    print(err, file=sys.stderr)
+                    raise Exception(err)
+                return r
+
+            res.addBoth(check_ctx)
+            return res
+
+        return wrapped
+
+    defer.inlineCallbacks = new_inline_callbacks
diff --git a/tests/push/__init__.py b/tests/push/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/push/__init__.py
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
new file mode 100644
index 0000000000..50ee6910d1
--- /dev/null
+++ b/tests/push/test_email.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import pkg_resources
+
+from twisted.internet.defer import Deferred
+
+from synapse.rest.client.v1 import admin, login, room
+
+from tests.unittest import HomeserverTestCase
+
+try:
+    from synapse.push.mailer import load_jinja2_templates
+except Exception:
+    load_jinja2_templates = None
+
+
+class EmailPusherTests(HomeserverTestCase):
+
+    skip = "No Jinja installed" if not load_jinja2_templates else None
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+    user_id = True
+    hijack_auth = False
+
+    def make_homeserver(self, reactor, clock):
+
+        # List[Tuple[Deferred, args, kwargs]]
+        self.email_attempts = []
+
+        def sendmail(*args, **kwargs):
+            d = Deferred()
+            self.email_attempts.append((d, args, kwargs))
+            return d
+
+        config = self.default_config()
+        config.email_enable_notifs = True
+        config.start_pushers = True
+
+        config.email_template_dir = os.path.abspath(
+            pkg_resources.resource_filename('synapse', 'res/templates')
+        )
+        config.email_notif_template_html = "notif_mail.html"
+        config.email_notif_template_text = "notif_mail.txt"
+        config.email_smtp_host = "127.0.0.1"
+        config.email_smtp_port = 20
+        config.require_transport_security = False
+        config.email_smtp_user = None
+        config.email_app_name = "Matrix"
+        config.email_notif_from = "test@example.com"
+
+        hs = self.setup_test_homeserver(config=config, sendmail=sendmail)
+
+        return hs
+
+    def test_sends_email(self):
+
+        # Register the user who gets notified
+        user_id = self.register_user("user", "pass")
+        access_token = self.login("user", "pass")
+
+        # Register the user who sends the message
+        other_user_id = self.register_user("otheruser", "pass")
+        other_access_token = self.login("otheruser", "pass")
+
+        # Register the pusher
+        user_tuple = self.get_success(
+            self.hs.get_datastore().get_user_by_access_token(access_token)
+        )
+        token_id = user_tuple["token_id"]
+
+        self.get_success(
+            self.hs.get_pusherpool().add_pusher(
+                user_id=user_id,
+                access_token=token_id,
+                kind="email",
+                app_id="m.email",
+                app_display_name="Email Notifications",
+                device_display_name="a@example.com",
+                pushkey="a@example.com",
+                lang=None,
+                data={},
+            )
+        )
+
+        # Create a room
+        room = self.helper.create_room_as(user_id, tok=access_token)
+
+        # Invite the other person
+        self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)
+
+        # The other user joins
+        self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+        # The other user sends some messages
+        self.helper.send(room, body="Hi!", tok=other_access_token)
+        self.helper.send(room, body="There!", tok=other_access_token)
+
+        # Get the stream ordering before it gets sent
+        pushers = self.get_success(
+            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+        )
+        self.assertEqual(len(pushers), 1)
+        last_stream_ordering = pushers[0]["last_stream_ordering"]
+
+        # Advance time a bit, so the pusher will register something has happened
+        self.pump(100)
+
+        # It hasn't succeeded yet, so the stream ordering shouldn't have moved
+        pushers = self.get_success(
+            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+        )
+        self.assertEqual(len(pushers), 1)
+        self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"])
+
+        # One email was attempted to be sent
+        self.assertEqual(len(self.email_attempts), 1)
+
+        # Make the email succeed
+        self.email_attempts[0][0].callback(True)
+        self.pump()
+
+        # One email was attempted to be sent
+        self.assertEqual(len(self.email_attempts), 1)
+
+        # The stream ordering has increased
+        pushers = self.get_success(
+            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+        )
+        self.assertEqual(len(pushers), 1)
+        self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
new file mode 100644
index 0000000000..6dc45e8506
--- /dev/null
+++ b/tests/push/test_http.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from twisted.internet.defer import Deferred
+
+from synapse.rest.client.v1 import admin, login, room
+from synapse.util.logcontext import make_deferred_yieldable
+
+from tests.unittest import HomeserverTestCase
+
+try:
+    from synapse.push.mailer import load_jinja2_templates
+except Exception:
+    load_jinja2_templates = None
+
+
+class HTTPPusherTests(HomeserverTestCase):
+
+    skip = "No Jinja installed" if not load_jinja2_templates else None
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+    user_id = True
+    hijack_auth = False
+
+    def make_homeserver(self, reactor, clock):
+
+        self.push_attempts = []
+
+        m = Mock()
+
+        def post_json_get_json(url, body):
+            d = Deferred()
+            self.push_attempts.append((d, url, body))
+            return make_deferred_yieldable(d)
+
+        m.post_json_get_json = post_json_get_json
+
+        config = self.default_config()
+        config.start_pushers = True
+
+        hs = self.setup_test_homeserver(config=config, simple_http_client=m)
+
+        return hs
+
+    def test_sends_http(self):
+        """
+        The HTTP pusher will send pushes for each message to a HTTP endpoint
+        when configured to do so.
+        """
+        # Register the user who gets notified
+        user_id = self.register_user("user", "pass")
+        access_token = self.login("user", "pass")
+
+        # Register the user who sends the message
+        other_user_id = self.register_user("otheruser", "pass")
+        other_access_token = self.login("otheruser", "pass")
+
+        # Register the pusher
+        user_tuple = self.get_success(
+            self.hs.get_datastore().get_user_by_access_token(access_token)
+        )
+        token_id = user_tuple["token_id"]
+
+        self.get_success(
+            self.hs.get_pusherpool().add_pusher(
+                user_id=user_id,
+                access_token=token_id,
+                kind="http",
+                app_id="m.http",
+                app_display_name="HTTP Push Notifications",
+                device_display_name="pushy push",
+                pushkey="a@example.com",
+                lang=None,
+                data={"url": "example.com"},
+            )
+        )
+
+        # Create a room
+        room = self.helper.create_room_as(user_id, tok=access_token)
+
+        # Invite the other person
+        self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)
+
+        # The other user joins
+        self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+        # The other user sends some messages
+        self.helper.send(room, body="Hi!", tok=other_access_token)
+        self.helper.send(room, body="There!", tok=other_access_token)
+
+        # Get the stream ordering before it gets sent
+        pushers = self.get_success(
+            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+        )
+        self.assertEqual(len(pushers), 1)
+        last_stream_ordering = pushers[0]["last_stream_ordering"]
+
+        # Advance time a bit, so the pusher will register something has happened
+        self.pump()
+
+        # It hasn't succeeded yet, so the stream ordering shouldn't have moved
+        pushers = self.get_success(
+            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+        )
+        self.assertEqual(len(pushers), 1)
+        self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"])
+
+        # One push was attempted to be sent -- it'll be the first message
+        self.assertEqual(len(self.push_attempts), 1)
+        self.assertEqual(self.push_attempts[0][1], "example.com")
+        self.assertEqual(
+            self.push_attempts[0][2]["notification"]["content"]["body"], "Hi!"
+        )
+
+        # Make the push succeed
+        self.push_attempts[0][0].callback({})
+        self.pump()
+
+        # The stream ordering has increased
+        pushers = self.get_success(
+            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+        )
+        self.assertEqual(len(pushers), 1)
+        self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
+        last_stream_ordering = pushers[0]["last_stream_ordering"]
+
+        # Now it'll try and send the second push message, which will be the second one
+        self.assertEqual(len(self.push_attempts), 2)
+        self.assertEqual(self.push_attempts[1][1], "example.com")
+        self.assertEqual(
+            self.push_attempts[1][2]["notification"]["content"]["body"], "There!"
+        )
+
+        # Make the second push succeed
+        self.push_attempts[1][0].callback({})
+        self.pump()
+
+        # The stream ordering has increased, again
+        pushers = self.get_success(
+            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+        )
+        self.assertEqual(len(pushers), 1)
+        self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index 41be5d5a1a..1688a741d1 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -28,8 +28,8 @@ ROOM_ID = "!room:blue"
 
 
 def dict_equals(self, other):
-    me = encode_canonical_json(self._event_dict)
-    them = encode_canonical_json(other._event_dict)
+    me = encode_canonical_json(self.get_pdu_json())
+    them = encode_canonical_json(other.get_pdu_json())
     return me == them
 
 
diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py
new file mode 100644
index 0000000000..4294bbec2a
--- /dev/null
+++ b/tests/rest/client/test_consent.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from synapse.api.urls import ConsentURIBuilder
+from synapse.rest.client.v1 import admin, login, room
+from synapse.rest.consent import consent_resource
+
+from tests import unittest
+from tests.server import render
+
+try:
+    from synapse.push.mailer import load_jinja2_templates
+except Exception:
+    load_jinja2_templates = None
+
+
+class ConsentResourceTestCase(unittest.HomeserverTestCase):
+    skip = "No Jinja installed" if not load_jinja2_templates else None
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+    user_id = True
+    hijack_auth = False
+
+    def make_homeserver(self, reactor, clock):
+
+        config = self.default_config()
+        config.user_consent_version = "1"
+        config.public_baseurl = ""
+        config.form_secret = "123abc"
+
+        # Make some temporary templates...
+        temp_consent_path = self.mktemp()
+        os.mkdir(temp_consent_path)
+        os.mkdir(os.path.join(temp_consent_path, 'en'))
+        config.user_consent_template_dir = os.path.abspath(temp_consent_path)
+
+        with open(os.path.join(temp_consent_path, "en/1.html"), 'w') as f:
+            f.write("{{version}},{{has_consented}}")
+
+        with open(os.path.join(temp_consent_path, "en/success.html"), 'w') as f:
+            f.write("yay!")
+
+        hs = self.setup_test_homeserver(config=config)
+        return hs
+
+    def test_render_public_consent(self):
+        """You can observe the terms form without specifying a user"""
+        resource = consent_resource.ConsentResource(self.hs)
+        request, channel = self.make_request("GET", "/consent?v=1", shorthand=False)
+        render(request, resource, self.reactor)
+        self.assertEqual(channel.code, 200)
+
+    def test_accept_consent(self):
+        """
+        A user can use the consent form to accept the terms.
+        """
+        uri_builder = ConsentURIBuilder(self.hs.config)
+        resource = consent_resource.ConsentResource(self.hs)
+
+        # Register a user
+        user_id = self.register_user("user", "pass")
+        access_token = self.login("user", "pass")
+
+        # Fetch the consent page, to get the consent version
+        consent_uri = (
+            uri_builder.build_user_consent_uri(user_id).replace("_matrix/", "")
+            + "&u=user"
+        )
+        request, channel = self.make_request(
+            "GET", consent_uri, access_token=access_token, shorthand=False
+        )
+        render(request, resource, self.reactor)
+        self.assertEqual(channel.code, 200)
+
+        # Get the version from the body, and whether we've consented
+        version, consented = channel.result["body"].decode('ascii').split(",")
+        self.assertEqual(consented, "False")
+
+        # POST to the consent page, saying we've agreed
+        request, channel = self.make_request(
+            "POST",
+            consent_uri + "&v=" + version,
+            access_token=access_token,
+            shorthand=False,
+        )
+        render(request, resource, self.reactor)
+        self.assertEqual(channel.code, 200)
+
+        # Fetch the consent page, to get the consent version -- it should have
+        # changed
+        request, channel = self.make_request(
+            "GET", consent_uri, access_token=access_token, shorthand=False
+        )
+        render(request, resource, self.reactor)
+        self.assertEqual(channel.code, 200)
+
+        # Get the version from the body, and check that it's the version we
+        # agreed to, and that we've consented to it.
+        version, consented = channel.result["body"].decode('ascii').split(",")
+        self.assertEqual(consented, "True")
+        self.assertEqual(version, "1")
diff --git a/tests/rest/client/v1/test_admin.py b/tests/rest/client/v1/test_admin.py
index 1a553fa3f9..407bf0ac4c 100644
--- a/tests/rest/client/v1/test_admin.py
+++ b/tests/rest/client/v1/test_admin.py
@@ -19,24 +19,18 @@ import json
 
 from mock import Mock
 
-from synapse.http.server import JsonResource
+from synapse.api.constants import UserTypes
 from synapse.rest.client.v1.admin import register_servlets
-from synapse.util import Clock
 
 from tests import unittest
-from tests.server import (
-    ThreadedMemoryReactorClock,
-    make_request,
-    render,
-    setup_test_homeserver,
-)
 
 
-class UserRegisterTestCase(unittest.TestCase):
-    def setUp(self):
+class UserRegisterTestCase(unittest.HomeserverTestCase):
+
+    servlets = [register_servlets]
+
+    def make_homeserver(self, reactor, clock):
 
-        self.clock = ThreadedMemoryReactorClock()
-        self.hs_clock = Clock(self.clock)
         self.url = "/_matrix/client/r0/admin/register"
 
         self.registration_handler = Mock()
@@ -50,17 +44,14 @@ class UserRegisterTestCase(unittest.TestCase):
 
         self.secrets = Mock()
 
-        self.hs = setup_test_homeserver(
-            self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
-        )
+        self.hs = self.setup_test_homeserver()
 
         self.hs.config.registration_shared_secret = u"shared"
 
         self.hs.get_media_repository = Mock()
         self.hs.get_deactivate_account_handler = Mock()
 
-        self.resource = JsonResource(self.hs)
-        register_servlets(self.hs, self.resource)
+        return self.hs
 
     def test_disabled(self):
         """
@@ -69,8 +60,8 @@ class UserRegisterTestCase(unittest.TestCase):
         """
         self.hs.config.registration_shared_secret = None
 
-        request, channel = make_request("POST", self.url, b'{}')
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, b'{}')
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual(
@@ -87,8 +78,8 @@ class UserRegisterTestCase(unittest.TestCase):
 
         self.hs.get_secrets = Mock(return_value=secrets)
 
-        request, channel = make_request("GET", self.url)
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
 
         self.assertEqual(channel.json_body, {"nonce": "abcd"})
 
@@ -97,25 +88,25 @@ class UserRegisterTestCase(unittest.TestCase):
         Calling GET on the endpoint will return a randomised nonce, which will
         only last for SALT_TIMEOUT (60s).
         """
-        request, channel = make_request("GET", self.url)
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
         nonce = channel.json_body["nonce"]
 
         # 59 seconds
-        self.clock.advance(59)
+        self.reactor.advance(59)
 
         body = json.dumps({"nonce": nonce})
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('username must be specified', channel.json_body["error"])
 
         # 61 seconds
-        self.clock.advance(2)
+        self.reactor.advance(2)
 
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('unrecognised nonce', channel.json_body["error"])
@@ -124,8 +115,8 @@ class UserRegisterTestCase(unittest.TestCase):
         """
         Only the provided nonce can be used, as it's checked in the MAC.
         """
-        request, channel = make_request("GET", self.url)
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
         nonce = channel.json_body["nonce"]
 
         want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
@@ -141,8 +132,8 @@ class UserRegisterTestCase(unittest.TestCase):
                 "mac": want_mac,
             }
         )
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual("HMAC incorrect", channel.json_body["error"])
@@ -152,12 +143,14 @@ class UserRegisterTestCase(unittest.TestCase):
         When the correct nonce is provided, and the right key is provided, the
         user is registered.
         """
-        request, channel = make_request("GET", self.url)
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
         nonce = channel.json_body["nonce"]
 
         want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
-        want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin")
+        want_mac.update(
+            nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin\x00support"
+        )
         want_mac = want_mac.hexdigest()
 
         body = json.dumps(
@@ -166,11 +159,12 @@ class UserRegisterTestCase(unittest.TestCase):
                 "username": "bob",
                 "password": "abc123",
                 "admin": True,
+                "user_type": UserTypes.SUPPORT,
                 "mac": want_mac,
             }
         )
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual("@bob:test", channel.json_body["user_id"])
@@ -179,12 +173,14 @@ class UserRegisterTestCase(unittest.TestCase):
         """
         A valid unrecognised nonce.
         """
-        request, channel = make_request("GET", self.url)
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
         nonce = channel.json_body["nonce"]
 
         want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
-        want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin")
+        want_mac.update(
+            nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin"
+        )
         want_mac = want_mac.hexdigest()
 
         body = json.dumps(
@@ -196,15 +192,15 @@ class UserRegisterTestCase(unittest.TestCase):
                 "mac": want_mac,
             }
         )
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual("@bob:test", channel.json_body["user_id"])
 
         # Now, try and reuse it
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('unrecognised nonce', channel.json_body["error"])
@@ -212,13 +208,13 @@ class UserRegisterTestCase(unittest.TestCase):
     def test_missing_parts(self):
         """
         Synapse will complain if you don't give nonce, username, password, and
-        mac.  Admin is optional.  Additional checks are done for length and
-        type.
+        mac.  Admin and user_types are optional.  Additional checks are done for length
+        and type.
         """
 
         def nonce():
-            request, channel = make_request("GET", self.url)
-            render(request, self.resource, self.clock)
+            request, channel = self.make_request("GET", self.url)
+            self.render(request)
             return channel.json_body["nonce"]
 
         #
@@ -227,8 +223,8 @@ class UserRegisterTestCase(unittest.TestCase):
 
         # Must be present
         body = json.dumps({})
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('nonce must be specified', channel.json_body["error"])
@@ -239,52 +235,52 @@ class UserRegisterTestCase(unittest.TestCase):
 
         # Must be present
         body = json.dumps({"nonce": nonce()})
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('username must be specified', channel.json_body["error"])
 
         # Must be a string
         body = json.dumps({"nonce": nonce(), "username": 1234})
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('Invalid username', channel.json_body["error"])
 
         # Must not have null bytes
         body = json.dumps({"nonce": nonce(), "username": u"abcd\u0000"})
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('Invalid username', channel.json_body["error"])
 
         # Must not have null bytes
         body = json.dumps({"nonce": nonce(), "username": "a" * 1000})
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('Invalid username', channel.json_body["error"])
 
         #
-        # Username checks
+        # Password checks
         #
 
         # Must be present
         body = json.dumps({"nonce": nonce(), "username": "a"})
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('password must be specified', channel.json_body["error"])
 
         # Must be a string
         body = json.dumps({"nonce": nonce(), "username": "a", "password": 1234})
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('Invalid password', channel.json_body["error"])
@@ -293,16 +289,33 @@ class UserRegisterTestCase(unittest.TestCase):
         body = json.dumps(
             {"nonce": nonce(), "username": "a", "password": u"abcd\u0000"}
         )
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('Invalid password', channel.json_body["error"])
 
         # Super long
         body = json.dumps({"nonce": nonce(), "username": "a", "password": "A" * 1000})
-        request, channel = make_request("POST", self.url, body.encode('utf8'))
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual('Invalid password', channel.json_body["error"])
+
+        #
+        # user_type check
+        #
+
+        # Invalid user_type
+        body = json.dumps({
+            "nonce": nonce(),
+            "username": "a",
+            "password": "1234",
+            "user_type": "invalid"}
+        )
+        request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('Invalid user type', channel.json_body["error"])
diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py
index 956f7fc4c4..483bebc832 100644
--- a/tests/rest/client/v1/test_events.py
+++ b/tests/rest/client/v1/test_events.py
@@ -16,64 +16,49 @@
 """ Tests REST events for /events paths."""
 
 from mock import Mock, NonCallableMock
-from six import PY3
 
-from twisted.internet import defer
+from synapse.rest.client.v1 import admin, events, login, room
 
-from ....utils import MockHttpResource, setup_test_homeserver
-from .utils import RestTestCase
+from tests import unittest
 
-PATH_PREFIX = "/_matrix/client/api/v1"
 
-
-class EventStreamPermissionsTestCase(RestTestCase):
+class EventStreamPermissionsTestCase(unittest.HomeserverTestCase):
     """ Tests event streaming (GET /events). """
 
-    if PY3:
-        skip = "Skip on Py3 until ported to use not V1 only register."
+    servlets = [
+        events.register_servlets,
+        room.register_servlets,
+        admin.register_servlets,
+        login.register_servlets,
+    ]
 
-    @defer.inlineCallbacks
-    def setUp(self):
-        import synapse.rest.client.v1.events
-        import synapse.rest.client.v1_only.register
-        import synapse.rest.client.v1.room
+    def make_homeserver(self, reactor, clock):
 
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+        config = self.default_config()
+        config.enable_registration_captcha = False
+        config.enable_registration = True
+        config.auto_join_rooms = []
 
-        hs = yield setup_test_homeserver(
-            self.addCleanup,
-            http_client=None,
-            federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=["send_message"]),
+        hs = self.setup_test_homeserver(
+            config=config, ratelimiter=NonCallableMock(spec_set=["send_message"])
         )
         self.ratelimiter = hs.get_ratelimiter()
         self.ratelimiter.send_message.return_value = (True, 0)
-        hs.config.enable_registration_captcha = False
-        hs.config.enable_registration = True
-        hs.config.auto_join_rooms = []
 
         hs.get_handlers().federation_handler = Mock()
 
-        synapse.rest.client.v1_only.register.register_servlets(hs, self.mock_resource)
-        synapse.rest.client.v1.events.register_servlets(hs, self.mock_resource)
-        synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+        return hs
+
+    def prepare(self, hs, reactor, clock):
 
         # register an account
-        self.user_id = "sid1"
-        response = yield self.register(self.user_id)
-        self.token = response["access_token"]
-        self.user_id = response["user_id"]
+        self.user_id = self.register_user("sid1", "pass")
+        self.token = self.login(self.user_id, "pass")
 
         # register a 2nd account
-        self.other_user = "other1"
-        response = yield self.register(self.other_user)
-        self.other_token = response["access_token"]
-        self.other_user = response["user_id"]
+        self.other_user = self.register_user("other2", "pass")
+        self.other_token = self.login(self.other_user, "pass")
 
-    def tearDown(self):
-        pass
-
-    @defer.inlineCallbacks
     def test_stream_basic_permissions(self):
         # invalid token, expect 401
         # note: this is in violation of the original v1 spec, which expected
@@ -81,34 +66,37 @@ class EventStreamPermissionsTestCase(RestTestCase):
         # implementation is now part of the r0 implementation, the newer
         # behaviour is used instead to be consistent with the r0 spec.
         # see issue #2602
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/events?access_token=%s" % ("invalid" + self.token,)
+        request, channel = self.make_request(
+            "GET", "/events?access_token=%s" % ("invalid" + self.token,)
         )
-        self.assertEquals(401, code, msg=str(response))
+        self.render(request)
+        self.assertEquals(channel.code, 401, msg=channel.result)
 
         # valid token, expect content
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/events?access_token=%s&timeout=0" % (self.token,)
+        request, channel = self.make_request(
+            "GET", "/events?access_token=%s&timeout=0" % (self.token,)
         )
-        self.assertEquals(200, code, msg=str(response))
-        self.assertTrue("chunk" in response)
-        self.assertTrue("start" in response)
-        self.assertTrue("end" in response)
+        self.render(request)
+        self.assertEquals(channel.code, 200, msg=channel.result)
+        self.assertTrue("chunk" in channel.json_body)
+        self.assertTrue("start" in channel.json_body)
+        self.assertTrue("end" in channel.json_body)
 
-    @defer.inlineCallbacks
     def test_stream_room_permissions(self):
-        room_id = yield self.create_room_as(self.other_user, tok=self.other_token)
-        yield self.send(room_id, tok=self.other_token)
+        room_id = self.helper.create_room_as(self.other_user, tok=self.other_token)
+        self.helper.send(room_id, tok=self.other_token)
 
         # invited to room (expect no content for room)
-        yield self.invite(
+        self.helper.invite(
             room_id, src=self.other_user, targ=self.user_id, tok=self.other_token
         )
 
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/events?access_token=%s&timeout=0" % (self.token,)
+        # valid token, expect content
+        request, channel = self.make_request(
+            "GET", "/events?access_token=%s&timeout=0" % (self.token,)
         )
-        self.assertEquals(200, code, msg=str(response))
+        self.render(request)
+        self.assertEquals(channel.code, 200, msg=channel.result)
 
         # We may get a presence event for ourselves down
         self.assertEquals(
@@ -116,7 +104,7 @@ class EventStreamPermissionsTestCase(RestTestCase):
             len(
                 [
                     c
-                    for c in response["chunk"]
+                    for c in channel.json_body["chunk"]
                     if not (
                         c.get("type") == "m.presence"
                         and c["content"].get("user_id") == self.user_id
@@ -126,7 +114,7 @@ class EventStreamPermissionsTestCase(RestTestCase):
         )
 
         # joined room (expect all content for room)
-        yield self.join(room=room_id, user=self.user_id, tok=self.token)
+        self.helper.join(room=room_id, user=self.user_id, tok=self.token)
 
         # left to room (expect no content for room)
 
diff --git a/tests/rest/client/v1/test_register.py b/tests/rest/client/v1/test_register.py
deleted file mode 100644
index 6b7ff813d5..0000000000
--- a/tests/rest/client/v1/test_register.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import json
-
-from mock import Mock
-from six import PY3
-
-from twisted.test.proto_helpers import MemoryReactorClock
-
-from synapse.http.server import JsonResource
-from synapse.rest.client.v1_only.register import register_servlets
-from synapse.util import Clock
-
-from tests import unittest
-from tests.server import make_request, render, setup_test_homeserver
-
-
-class CreateUserServletTestCase(unittest.TestCase):
-    """
-    Tests for CreateUserRestServlet.
-    """
-
-    if PY3:
-        skip = "Not ported to Python 3."
-
-    def setUp(self):
-        self.registration_handler = Mock()
-
-        self.appservice = Mock(sender="@as:test")
-        self.datastore = Mock(
-            get_app_service_by_token=Mock(return_value=self.appservice)
-        )
-
-        handlers = Mock(registration_handler=self.registration_handler)
-        self.clock = MemoryReactorClock()
-        self.hs_clock = Clock(self.clock)
-
-        self.hs = self.hs = setup_test_homeserver(
-            self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
-        )
-        self.hs.get_datastore = Mock(return_value=self.datastore)
-        self.hs.get_handlers = Mock(return_value=handlers)
-
-    def test_POST_createuser_with_valid_user(self):
-
-        res = JsonResource(self.hs)
-        register_servlets(self.hs, res)
-
-        request_data = json.dumps(
-            {
-                "localpart": "someone",
-                "displayname": "someone interesting",
-                "duration_seconds": 200,
-            }
-        )
-
-        url = b'/_matrix/client/api/v1/createUser?access_token=i_am_an_app_service'
-
-        user_id = "@someone:interesting"
-        token = "my token"
-
-        self.registration_handler.get_or_create_user = Mock(
-            return_value=(user_id, token)
-        )
-
-        request, channel = make_request(b"POST", url, request_data)
-        render(request, res, self.clock)
-
-        self.assertEquals(channel.result["code"], b"200")
-
-        det_data = {
-            "user_id": user_id,
-            "access_token": token,
-            "home_server": self.hs.hostname,
-        }
-        self.assertDictContainsSubset(det_data, json.loads(channel.result["body"]))
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index 530dc8ba6d..9c401bf300 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -169,7 +169,7 @@ class RestHelper(object):
             path = path + "?access_token=%s" % tok
 
         request, channel = make_request(
-            "POST", path, json.dumps(content).encode('utf8')
+            self.hs.get_reactor(), "POST", path, json.dumps(content).encode('utf8')
         )
         render(request, self.resource, self.hs.get_reactor())
 
@@ -217,7 +217,9 @@ class RestHelper(object):
 
         data = {"membership": membership}
 
-        request, channel = make_request("PUT", path, json.dumps(data).encode('utf8'))
+        request, channel = make_request(
+            self.hs.get_reactor(), "PUT", path, json.dumps(data).encode('utf8')
+        )
 
         render(request, self.resource, self.hs.get_reactor())
 
@@ -228,18 +230,6 @@ class RestHelper(object):
 
         self.auth_user_id = temp_id
 
-    @defer.inlineCallbacks
-    def register(self, user_id):
-        (code, response) = yield self.mock_resource.trigger(
-            "POST",
-            "/_matrix/client/r0/register",
-            json.dumps(
-                {"user": user_id, "password": "test", "type": "m.login.password"}
-            ),
-        )
-        self.assertEquals(200, code)
-        defer.returnValue(response)
-
     def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200):
         if txn_id is None:
             txn_id = "m%s" % (str(time.time()))
@@ -251,7 +241,9 @@ class RestHelper(object):
         if tok:
             path = path + "?access_token=%s" % tok
 
-        request, channel = make_request("PUT", path, json.dumps(content).encode('utf8'))
+        request, channel = make_request(
+            self.hs.get_reactor(), "PUT", path, json.dumps(content).encode('utf8')
+        )
         render(request, self.resource, self.hs.get_reactor())
 
         assert int(channel.result["code"]) == expect_code, (
diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py
new file mode 100644
index 0000000000..7fa120a10f
--- /dev/null
+++ b/tests/rest/client/v2_alpha/test_auth.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet.defer import succeed
+
+from synapse.api.constants import LoginType
+from synapse.rest.client.v1 import admin
+from synapse.rest.client.v2_alpha import auth, register
+
+from tests import unittest
+
+
+class FallbackAuthTests(unittest.HomeserverTestCase):
+
+    servlets = [
+        auth.register_servlets,
+        admin.register_servlets,
+        register.register_servlets,
+    ]
+    hijack_auth = False
+
+    def make_homeserver(self, reactor, clock):
+
+        config = self.default_config()
+
+        config.enable_registration_captcha = True
+        config.recaptcha_public_key = "brokencake"
+        config.registrations_require_3pid = []
+
+        hs = self.setup_test_homeserver(config=config)
+        return hs
+
+    def prepare(self, reactor, clock, hs):
+        auth_handler = hs.get_auth_handler()
+
+        self.recaptcha_attempts = []
+
+        def _recaptcha(authdict, clientip):
+            self.recaptcha_attempts.append((authdict, clientip))
+            return succeed(True)
+
+        auth_handler.checkers[LoginType.RECAPTCHA] = _recaptcha
+
+    @unittest.INFO
+    def test_fallback_captcha(self):
+
+        request, channel = self.make_request(
+            "POST",
+            "register",
+            {"username": "user", "type": "m.login.password", "password": "bar"},
+        )
+        self.render(request)
+
+        # Returns a 401 as per the spec
+        self.assertEqual(request.code, 401)
+        # Grab the session
+        session = channel.json_body["session"]
+        # Assert our configured public key is being given
+        self.assertEqual(
+            channel.json_body["params"]["m.login.recaptcha"]["public_key"], "brokencake"
+        )
+
+        request, channel = self.make_request(
+            "GET", "auth/m.login.recaptcha/fallback/web?session=" + session
+        )
+        self.render(request)
+        self.assertEqual(request.code, 200)
+
+        request, channel = self.make_request(
+            "POST",
+            "auth/m.login.recaptcha/fallback/web?session="
+            + session
+            + "&g-recaptcha-response=a",
+        )
+        self.render(request)
+        self.assertEqual(request.code, 200)
+
+        # The recaptcha handler is called with the response given
+        self.assertEqual(len(self.recaptcha_attempts), 1)
+        self.assertEqual(self.recaptcha_attempts[0][0]["response"], "a")
+
+        # Now we have fufilled the recaptcha fallback step, we can then send a
+        # request to the register API with the session in the authdict.
+        request, channel = self.make_request(
+            "POST", "register", {"auth": {"session": session}}
+        )
+        self.render(request)
+        self.assertEqual(channel.code, 200)
+
+        # We're given a registered user.
+        self.assertEqual(channel.json_body["user_id"], "@user:test")
diff --git a/tests/rest/client/v2_alpha/test_capabilities.py b/tests/rest/client/v2_alpha/test_capabilities.py
new file mode 100644
index 0000000000..d3d43970fb
--- /dev/null
+++ b/tests/rest/client/v2_alpha/test_capabilities.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.api.constants import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS
+from synapse.rest.client.v1 import admin, login
+from synapse.rest.client.v2_alpha import capabilities
+
+from tests import unittest
+
+
+class CapabilitiesTestCase(unittest.HomeserverTestCase):
+
+    servlets = [
+        admin.register_servlets,
+        capabilities.register_servlets,
+        login.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor, clock):
+        self.url = b"/_matrix/client/r0/capabilities"
+        hs = self.setup_test_homeserver()
+        self.store = hs.get_datastore()
+        return hs
+
+    def test_check_auth_required(self):
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
+
+        self.assertEqual(channel.code, 401)
+
+    def test_get_room_version_capabilities(self):
+        self.register_user("user", "pass")
+        access_token = self.login("user", "pass")
+
+        request, channel = self.make_request("GET", self.url, access_token=access_token)
+        self.render(request)
+        capabilities = channel.json_body['capabilities']
+
+        self.assertEqual(channel.code, 200)
+        for room_version in capabilities['m.room_versions']['available'].keys():
+            self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, "" + room_version)
+        self.assertEqual(
+            DEFAULT_ROOM_VERSION, capabilities['m.room_versions']['default']
+        )
+
+    def test_get_change_password_capabilities(self):
+        localpart = "user"
+        password = "pass"
+        user = self.register_user(localpart, password)
+        access_token = self.login(user, password)
+
+        request, channel = self.make_request("GET", self.url, access_token=access_token)
+        self.render(request)
+        capabilities = channel.json_body['capabilities']
+
+        self.assertEqual(channel.code, 200)
+
+        # Test case where password is handled outside of Synapse
+        self.assertTrue(capabilities['m.change_password']['enabled'])
+        self.get_success(self.store.user_set_password_hash(user, None))
+        request, channel = self.make_request("GET", self.url, access_token=access_token)
+        self.render(request)
+        capabilities = channel.json_body['capabilities']
+
+        self.assertEqual(channel.code, 200)
+        self.assertFalse(capabilities['m.change_password']['enabled'])
diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py
index 6a886ee3b8..f42a8efbf4 100644
--- a/tests/rest/client/v2_alpha/test_filter.py
+++ b/tests/rest/client/v2_alpha/test_filter.py
@@ -13,84 +13,47 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import synapse.types
 from synapse.api.errors import Codes
-from synapse.http.server import JsonResource
 from synapse.rest.client.v2_alpha import filter
-from synapse.types import UserID
-from synapse.util import Clock
 
 from tests import unittest
-from tests.server import (
-    ThreadedMemoryReactorClock as MemoryReactorClock,
-    make_request,
-    render,
-    setup_test_homeserver,
-)
 
 PATH_PREFIX = "/_matrix/client/v2_alpha"
 
 
-class FilterTestCase(unittest.TestCase):
+class FilterTestCase(unittest.HomeserverTestCase):
 
-    USER_ID = "@apple:test"
+    user_id = "@apple:test"
+    hijack_auth = True
     EXAMPLE_FILTER = {"room": {"timeline": {"types": ["m.room.message"]}}}
     EXAMPLE_FILTER_JSON = b'{"room": {"timeline": {"types": ["m.room.message"]}}}'
-    TO_REGISTER = [filter]
+    servlets = [filter.register_servlets]
 
-    def setUp(self):
-        self.clock = MemoryReactorClock()
-        self.hs_clock = Clock(self.clock)
-
-        self.hs = setup_test_homeserver(
-            self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
-        )
-
-        self.auth = self.hs.get_auth()
-
-        def get_user_by_access_token(token=None, allow_guest=False):
-            return {
-                "user": UserID.from_string(self.USER_ID),
-                "token_id": 1,
-                "is_guest": False,
-            }
-
-        def get_user_by_req(request, allow_guest=False, rights="access"):
-            return synapse.types.create_requester(
-                UserID.from_string(self.USER_ID), 1, False, None
-            )
-
-        self.auth.get_user_by_access_token = get_user_by_access_token
-        self.auth.get_user_by_req = get_user_by_req
-
-        self.store = self.hs.get_datastore()
-        self.filtering = self.hs.get_filtering()
-        self.resource = JsonResource(self.hs)
-
-        for r in self.TO_REGISTER:
-            r.register_servlets(self.hs, self.resource)
+    def prepare(self, reactor, clock, hs):
+        self.filtering = hs.get_filtering()
+        self.store = hs.get_datastore()
 
     def test_add_filter(self):
-        request, channel = make_request(
+        request, channel = self.make_request(
             "POST",
-            "/_matrix/client/r0/user/%s/filter" % (self.USER_ID),
+            "/_matrix/client/r0/user/%s/filter" % (self.user_id),
             self.EXAMPLE_FILTER_JSON,
         )
-        render(request, self.resource, self.clock)
+        self.render(request)
 
         self.assertEqual(channel.result["code"], b"200")
         self.assertEqual(channel.json_body, {"filter_id": "0"})
         filter = self.store.get_user_filter(user_localpart="apple", filter_id=0)
-        self.clock.advance(0)
+        self.pump()
         self.assertEquals(filter.result, self.EXAMPLE_FILTER)
 
     def test_add_filter_for_other_user(self):
-        request, channel = make_request(
+        request, channel = self.make_request(
             "POST",
             "/_matrix/client/r0/user/%s/filter" % ("@watermelon:test"),
             self.EXAMPLE_FILTER_JSON,
         )
-        render(request, self.resource, self.clock)
+        self.render(request)
 
         self.assertEqual(channel.result["code"], b"403")
         self.assertEquals(channel.json_body["errcode"], Codes.FORBIDDEN)
@@ -98,12 +61,12 @@ class FilterTestCase(unittest.TestCase):
     def test_add_filter_non_local_user(self):
         _is_mine = self.hs.is_mine
         self.hs.is_mine = lambda target_user: False
-        request, channel = make_request(
+        request, channel = self.make_request(
             "POST",
-            "/_matrix/client/r0/user/%s/filter" % (self.USER_ID),
+            "/_matrix/client/r0/user/%s/filter" % (self.user_id),
             self.EXAMPLE_FILTER_JSON,
         )
-        render(request, self.resource, self.clock)
+        self.render(request)
 
         self.hs.is_mine = _is_mine
         self.assertEqual(channel.result["code"], b"403")
@@ -113,21 +76,21 @@ class FilterTestCase(unittest.TestCase):
         filter_id = self.filtering.add_user_filter(
             user_localpart="apple", user_filter=self.EXAMPLE_FILTER
         )
-        self.clock.advance(1)
+        self.reactor.advance(1)
         filter_id = filter_id.result
-        request, channel = make_request(
-            "GET", "/_matrix/client/r0/user/%s/filter/%s" % (self.USER_ID, filter_id)
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/user/%s/filter/%s" % (self.user_id, filter_id)
         )
-        render(request, self.resource, self.clock)
+        self.render(request)
 
         self.assertEqual(channel.result["code"], b"200")
         self.assertEquals(channel.json_body, self.EXAMPLE_FILTER)
 
     def test_get_filter_non_existant(self):
-        request, channel = make_request(
-            "GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.USER_ID)
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.user_id)
         )
-        render(request, self.resource, self.clock)
+        self.render(request)
 
         self.assertEqual(channel.result["code"], b"400")
         self.assertEquals(channel.json_body["errcode"], Codes.NOT_FOUND)
@@ -135,18 +98,18 @@ class FilterTestCase(unittest.TestCase):
     # Currently invalid params do not have an appropriate errcode
     # in errors.py
     def test_get_filter_invalid_id(self):
-        request, channel = make_request(
-            "GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.USER_ID)
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.user_id)
         )
-        render(request, self.resource, self.clock)
+        self.render(request)
 
         self.assertEqual(channel.result["code"], b"400")
 
     # No ID also returns an invalid_id error
     def test_get_filter_no_id(self):
-        request, channel = make_request(
-            "GET", "/_matrix/client/r0/user/%s/filter/" % (self.USER_ID)
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/user/%s/filter/" % (self.user_id)
         )
-        render(request, self.resource, self.clock)
+        self.render(request)
 
         self.assertEqual(channel.result["code"], b"400")
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index 1c128e81f5..906b348d3e 100644
--- a/tests/rest/client/v2_alpha/test_register.py
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -1,83 +1,51 @@
 import json
 
-from mock import Mock
-
-from twisted.python import failure
-from twisted.test.proto_helpers import MemoryReactorClock
-
-from synapse.api.errors import InteractiveAuthIncompleteError
-from synapse.http.server import JsonResource
+from synapse.api.constants import LoginType
+from synapse.appservice import ApplicationService
 from synapse.rest.client.v2_alpha.register import register_servlets
-from synapse.util import Clock
 
 from tests import unittest
-from tests.server import make_request, render, setup_test_homeserver
 
 
-class RegisterRestServletTestCase(unittest.TestCase):
-    def setUp(self):
+class RegisterRestServletTestCase(unittest.HomeserverTestCase):
 
-        self.clock = MemoryReactorClock()
-        self.hs_clock = Clock(self.clock)
-        self.url = b"/_matrix/client/r0/register"
+    servlets = [register_servlets]
 
-        self.appservice = None
-        self.auth = Mock(
-            get_appservice_by_req=Mock(side_effect=lambda x: self.appservice)
-        )
+    def make_homeserver(self, reactor, clock):
 
-        self.auth_result = failure.Failure(InteractiveAuthIncompleteError(None))
-        self.auth_handler = Mock(
-            check_auth=Mock(side_effect=lambda x, y, z: self.auth_result),
-            get_session_data=Mock(return_value=None),
-        )
-        self.registration_handler = Mock()
-        self.identity_handler = Mock()
-        self.login_handler = Mock()
-        self.device_handler = Mock()
-        self.device_handler.check_device_registered = Mock(return_value="FAKE")
-
-        self.datastore = Mock(return_value=Mock())
-        self.datastore.get_current_state_deltas = Mock(return_value=[])
-
-        # do the dance to hook it up to the hs global
-        self.handlers = Mock(
-            registration_handler=self.registration_handler,
-            identity_handler=self.identity_handler,
-            login_handler=self.login_handler,
-        )
-        self.hs = setup_test_homeserver(
-            self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
-        )
-        self.hs.get_auth = Mock(return_value=self.auth)
-        self.hs.get_handlers = Mock(return_value=self.handlers)
-        self.hs.get_auth_handler = Mock(return_value=self.auth_handler)
-        self.hs.get_device_handler = Mock(return_value=self.device_handler)
-        self.hs.get_datastore = Mock(return_value=self.datastore)
+        self.url = b"/_matrix/client/r0/register"
+
+        self.hs = self.setup_test_homeserver()
         self.hs.config.enable_registration = True
         self.hs.config.registrations_require_3pid = []
         self.hs.config.auto_join_rooms = []
+        self.hs.config.enable_registration_captcha = False
 
-        self.resource = JsonResource(self.hs)
-        register_servlets(self.hs, self.resource)
+        return self.hs
 
     def test_POST_appservice_registration_valid(self):
-        user_id = "@kermit:muppet"
-        token = "kermits_access_token"
-        self.appservice = {"id": "1234"}
-        self.registration_handler.appservice_register = Mock(return_value=user_id)
-        self.auth_handler.get_access_token_for_user_id = Mock(return_value=token)
-        request_data = json.dumps({"username": "kermit"})
+        user_id = "@as_user_kermit:test"
+        as_token = "i_am_an_app_service"
+
+        appservice = ApplicationService(
+            as_token, self.hs.config.hostname,
+            id="1234",
+            namespaces={
+                "users": [{"regex": r"@as_user.*", "exclusive": True}],
+            },
+        )
+
+        self.hs.get_datastore().services_cache.append(appservice)
+        request_data = json.dumps({"username": "as_user_kermit"})
 
-        request, channel = make_request(
+        request, channel = self.make_request(
             b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
         )
-        render(request, self.resource, self.clock)
+        self.render(request)
 
         self.assertEquals(channel.result["code"], b"200", channel.result)
         det_data = {
             "user_id": user_id,
-            "access_token": token,
             "home_server": self.hs.hostname,
         }
         self.assertDictContainsSubset(det_data, channel.json_body)
@@ -85,81 +53,69 @@ class RegisterRestServletTestCase(unittest.TestCase):
     def test_POST_appservice_registration_invalid(self):
         self.appservice = None  # no application service exists
         request_data = json.dumps({"username": "kermit"})
-        request, channel = make_request(
+        request, channel = self.make_request(
             b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
         )
-        render(request, self.resource, self.clock)
+        self.render(request)
 
         self.assertEquals(channel.result["code"], b"401", channel.result)
 
     def test_POST_bad_password(self):
         request_data = json.dumps({"username": "kermit", "password": 666})
-        request, channel = make_request(b"POST", self.url, request_data)
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request(b"POST", self.url, request_data)
+        self.render(request)
 
         self.assertEquals(channel.result["code"], b"400", channel.result)
         self.assertEquals(channel.json_body["error"], "Invalid password")
 
     def test_POST_bad_username(self):
         request_data = json.dumps({"username": 777, "password": "monkey"})
-        request, channel = make_request(b"POST", self.url, request_data)
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request(b"POST", self.url, request_data)
+        self.render(request)
 
         self.assertEquals(channel.result["code"], b"400", channel.result)
         self.assertEquals(channel.json_body["error"], "Invalid username")
 
     def test_POST_user_valid(self):
-        user_id = "@kermit:muppet"
-        token = "kermits_access_token"
+        user_id = "@kermit:test"
         device_id = "frogfone"
-        request_data = json.dumps(
-            {"username": "kermit", "password": "monkey", "device_id": device_id}
-        )
-        self.registration_handler.check_username = Mock(return_value=True)
-        self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None)
-        self.registration_handler.register = Mock(return_value=(user_id, None))
-        self.auth_handler.get_access_token_for_user_id = Mock(return_value=token)
-        self.device_handler.check_device_registered = Mock(return_value=device_id)
-
-        request, channel = make_request(b"POST", self.url, request_data)
-        render(request, self.resource, self.clock)
+        params = {
+            "username": "kermit",
+            "password": "monkey",
+            "device_id": device_id,
+            "auth": {"type": LoginType.DUMMY},
+        }
+        request_data = json.dumps(params)
+        request, channel = self.make_request(b"POST", self.url, request_data)
+        self.render(request)
 
         det_data = {
             "user_id": user_id,
-            "access_token": token,
             "home_server": self.hs.hostname,
             "device_id": device_id,
         }
         self.assertEquals(channel.result["code"], b"200", channel.result)
         self.assertDictContainsSubset(det_data, channel.json_body)
-        self.auth_handler.get_login_tuple_for_user_id(
-            user_id, device_id=device_id, initial_device_display_name=None
-        )
 
     def test_POST_disabled_registration(self):
         self.hs.config.enable_registration = False
         request_data = json.dumps({"username": "kermit", "password": "monkey"})
-        self.registration_handler.check_username = Mock(return_value=True)
         self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None)
-        self.registration_handler.register = Mock(return_value=("@user:id", "t"))
 
-        request, channel = make_request(b"POST", self.url, request_data)
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request(b"POST", self.url, request_data)
+        self.render(request)
 
         self.assertEquals(channel.result["code"], b"403", channel.result)
         self.assertEquals(channel.json_body["error"], "Registration has been disabled")
 
     def test_POST_guest_registration(self):
-        user_id = "a@b"
         self.hs.config.macaroon_secret_key = "test"
         self.hs.config.allow_guest_access = True
-        self.registration_handler.register = Mock(return_value=(user_id, None))
 
-        request, channel = make_request(b"POST", self.url + b"?kind=guest", b"{}")
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
+        self.render(request)
 
         det_data = {
-            "user_id": user_id,
             "home_server": self.hs.hostname,
             "device_id": "guest_device",
         }
@@ -169,8 +125,8 @@ class RegisterRestServletTestCase(unittest.TestCase):
     def test_POST_disabled_guest_registration(self):
         self.hs.config.allow_guest_access = False
 
-        request, channel = make_request(b"POST", self.url + b"?kind=guest", b"{}")
-        render(request, self.resource, self.clock)
+        request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
+        self.render(request)
 
         self.assertEquals(channel.result["code"], b"403", channel.result)
         self.assertEquals(channel.json_body["error"], "Guest access is disabled")
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
index 4c30c5f258..99b716f00a 100644
--- a/tests/rest/client/v2_alpha/test_sync.py
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -15,9 +15,11 @@
 
 from mock import Mock
 
+from synapse.rest.client.v1 import admin, login, room
 from synapse.rest.client.v2_alpha import sync
 
 from tests import unittest
+from tests.server import TimedOutException
 
 
 class FilterTestCase(unittest.HomeserverTestCase):
@@ -65,3 +67,124 @@ class FilterTestCase(unittest.HomeserverTestCase):
                 ["next_batch", "rooms", "account_data", "to_device", "device_lists"]
             ).issubset(set(channel.json_body.keys()))
         )
+
+
+class SyncTypingTests(unittest.HomeserverTestCase):
+
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+        sync.register_servlets,
+    ]
+    user_id = True
+    hijack_auth = False
+
+    def test_sync_backwards_typing(self):
+        """
+        If the typing serial goes backwards and the typing handler is then reset
+        (such as when the master restarts and sets the typing serial to 0), we
+        do not incorrectly return typing information that had a serial greater
+        than the now-reset serial.
+        """
+        typing_url = "/rooms/%s/typing/%s?access_token=%s"
+        sync_url = "/sync?timeout=3000000&access_token=%s&since=%s"
+
+        # Register the user who gets notified
+        user_id = self.register_user("user", "pass")
+        access_token = self.login("user", "pass")
+
+        # Register the user who sends the message
+        other_user_id = self.register_user("otheruser", "pass")
+        other_access_token = self.login("otheruser", "pass")
+
+        # Create a room
+        room = self.helper.create_room_as(user_id, tok=access_token)
+
+        # Invite the other person
+        self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)
+
+        # The other user joins
+        self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+        # The other user sends some messages
+        self.helper.send(room, body="Hi!", tok=other_access_token)
+        self.helper.send(room, body="There!", tok=other_access_token)
+
+        # Start typing.
+        request, channel = self.make_request(
+            "PUT",
+            typing_url % (room, other_user_id, other_access_token),
+            b'{"typing": true, "timeout": 30000}',
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code)
+
+        request, channel = self.make_request(
+            "GET", "/sync?access_token=%s" % (access_token,)
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code)
+        next_batch = channel.json_body["next_batch"]
+
+        # Stop typing.
+        request, channel = self.make_request(
+            "PUT",
+            typing_url % (room, other_user_id, other_access_token),
+            b'{"typing": false}',
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code)
+
+        # Start typing.
+        request, channel = self.make_request(
+            "PUT",
+            typing_url % (room, other_user_id, other_access_token),
+            b'{"typing": true, "timeout": 30000}',
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code)
+
+        # Should return immediately
+        request, channel = self.make_request(
+            "GET", sync_url % (access_token, next_batch)
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code)
+        next_batch = channel.json_body["next_batch"]
+
+        # Reset typing serial back to 0, as if the master had.
+        typing = self.hs.get_typing_handler()
+        typing._latest_room_serial = 0
+
+        # Since it checks the state token, we need some state to update to
+        # invalidate the stream token.
+        self.helper.send(room, body="There!", tok=other_access_token)
+
+        request, channel = self.make_request(
+            "GET", sync_url % (access_token, next_batch)
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code)
+        next_batch = channel.json_body["next_batch"]
+
+        # This should time out! But it does not, because our stream token is
+        # ahead, and therefore it's saying the typing (that we've actually
+        # already seen) is new, since it's got a token above our new, now-reset
+        # stream token.
+        request, channel = self.make_request(
+            "GET", sync_url % (access_token, next_batch)
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code)
+        next_batch = channel.json_body["next_batch"]
+
+        # Clear the typing information, so that it doesn't think everything is
+        # in the future.
+        typing._reset()
+
+        # Now it SHOULD fail as it never completes!
+        request, channel = self.make_request(
+            "GET", sync_url % (access_token, next_batch)
+        )
+        self.assertRaises(TimedOutException, self.render, request)
diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py
index a86901c2d8..ad5e9a612f 100644
--- a/tests/rest/media/v1/test_media_storage.py
+++ b/tests/rest/media/v1/test_media_storage.py
@@ -17,15 +17,21 @@
 import os
 import shutil
 import tempfile
+from binascii import unhexlify
 
 from mock import Mock
+from six.moves.urllib import parse
 
 from twisted.internet import defer, reactor
+from twisted.internet.defer import Deferred
 
+from synapse.config.repository import MediaStorageProviderConfig
 from synapse.rest.media.v1._base import FileInfo
 from synapse.rest.media.v1.filepath import MediaFilePaths
 from synapse.rest.media.v1.media_storage import MediaStorage
 from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend
+from synapse.util.logcontext import make_deferred_yieldable
+from synapse.util.module_loader import load_module
 
 from tests import unittest
 
@@ -83,3 +89,143 @@ class MediaStorageTests(unittest.TestCase):
             body = f.read()
 
         self.assertEqual(test_body, body)
+
+
+class MediaRepoTests(unittest.HomeserverTestCase):
+
+    hijack_auth = True
+    user_id = "@test:user"
+
+    def make_homeserver(self, reactor, clock):
+
+        self.fetches = []
+
+        def get_file(destination, path, output_stream, args=None, max_size=None):
+            """
+            Returns tuple[int,dict,str,int] of file length, response headers,
+            absolute URI, and response code.
+            """
+
+            def write_to(r):
+                data, response = r
+                output_stream.write(data)
+                return response
+
+            d = Deferred()
+            d.addCallback(write_to)
+            self.fetches.append((d, destination, path, args))
+            return make_deferred_yieldable(d)
+
+        client = Mock()
+        client.get_file = get_file
+
+        self.storage_path = self.mktemp()
+        os.mkdir(self.storage_path)
+
+        config = self.default_config()
+        config.media_store_path = self.storage_path
+        config.thumbnail_requirements = {}
+        config.max_image_pixels = 2000000
+
+        provider_config = {
+            "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend",
+            "store_local": True,
+            "store_synchronous": False,
+            "store_remote": True,
+            "config": {"directory": self.storage_path},
+        }
+
+        loaded = list(load_module(provider_config)) + [
+            MediaStorageProviderConfig(False, False, False)
+        ]
+
+        config.media_storage_providers = [loaded]
+
+        hs = self.setup_test_homeserver(config=config, http_client=client)
+
+        return hs
+
+    def prepare(self, reactor, clock, hs):
+
+        self.media_repo = hs.get_media_repository_resource()
+        self.download_resource = self.media_repo.children[b'download']
+
+        # smol png
+        self.end_content = unhexlify(
+            b"89504e470d0a1a0a0000000d4948445200000001000000010806"
+            b"0000001f15c4890000000a49444154789c63000100000500010d"
+            b"0a2db40000000049454e44ae426082"
+        )
+
+    def _req(self, content_disposition):
+
+        request, channel = self.make_request(
+            "GET", "example.com/12345", shorthand=False
+        )
+        request.render(self.download_resource)
+        self.pump()
+
+        # We've made one fetch, to example.com, using the media URL, and asking
+        # the other server not to do a remote fetch
+        self.assertEqual(len(self.fetches), 1)
+        self.assertEqual(self.fetches[0][1], "example.com")
+        self.assertEqual(
+            self.fetches[0][2], "/_matrix/media/v1/download/example.com/12345"
+        )
+        self.assertEqual(self.fetches[0][3], {"allow_remote": "false"})
+
+        headers = {
+            b"Content-Length": [b"%d" % (len(self.end_content))],
+            b"Content-Type": [b'image/png'],
+        }
+        if content_disposition:
+            headers[b"Content-Disposition"] = [content_disposition]
+
+        self.fetches[0][0].callback(
+            (self.end_content, (len(self.end_content), headers))
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
+
+        return channel
+
+    def test_disposition_filename_ascii(self):
+        """
+        If the filename is filename=<ascii> then Synapse will decode it as an
+        ASCII string, and use filename= in the response.
+        """
+        channel = self._req(b"inline; filename=out.png")
+
+        headers = channel.headers
+        self.assertEqual(headers.getRawHeaders(b"Content-Type"), [b"image/png"])
+        self.assertEqual(
+            headers.getRawHeaders(b"Content-Disposition"), [b"inline; filename=out.png"]
+        )
+
+    def test_disposition_filenamestar_utf8escaped(self):
+        """
+        If the filename is filename=*utf8''<utf8 escaped> then Synapse will
+        correctly decode it as the UTF-8 string, and use filename* in the
+        response.
+        """
+        filename = parse.quote(u"\u2603".encode('utf8')).encode('ascii')
+        channel = self._req(b"inline; filename*=utf-8''" + filename + b".png")
+
+        headers = channel.headers
+        self.assertEqual(headers.getRawHeaders(b"Content-Type"), [b"image/png"])
+        self.assertEqual(
+            headers.getRawHeaders(b"Content-Disposition"),
+            [b"inline; filename*=utf-8''" + filename + b".png"],
+        )
+
+    def test_disposition_none(self):
+        """
+        If there is no filename, one isn't passed on in the Content-Disposition
+        of the request.
+        """
+        channel = self._req(None)
+
+        headers = channel.headers
+        self.assertEqual(headers.getRawHeaders(b"Content-Type"), [b"image/png"])
+        self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None)
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
new file mode 100644
index 0000000000..650ce95a6f
--- /dev/null
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -0,0 +1,470 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import attr
+from netaddr import IPSet
+
+from twisted.internet._resolver import HostResolution
+from twisted.internet.address import IPv4Address, IPv6Address
+from twisted.internet.error import DNSLookupError
+from twisted.python.failure import Failure
+from twisted.test.proto_helpers import AccumulatingProtocol
+from twisted.web._newclient import ResponseDone
+
+from synapse.config.repository import MediaStorageProviderConfig
+from synapse.util.module_loader import load_module
+
+from tests import unittest
+from tests.server import FakeTransport
+
+
+@attr.s
+class FakeResponse(object):
+    version = attr.ib()
+    code = attr.ib()
+    phrase = attr.ib()
+    headers = attr.ib()
+    body = attr.ib()
+    absoluteURI = attr.ib()
+
+    @property
+    def request(self):
+        @attr.s
+        class FakeTransport(object):
+            absoluteURI = self.absoluteURI
+
+        return FakeTransport()
+
+    def deliverBody(self, protocol):
+        protocol.dataReceived(self.body)
+        protocol.connectionLost(Failure(ResponseDone()))
+
+
+class URLPreviewTests(unittest.HomeserverTestCase):
+
+    hijack_auth = True
+    user_id = "@test:user"
+    end_content = (
+        b'<html><head>'
+        b'<meta property="og:title" content="~matrix~" />'
+        b'<meta property="og:description" content="hi" />'
+        b'</head></html>'
+    )
+
+    def make_homeserver(self, reactor, clock):
+
+        self.storage_path = self.mktemp()
+        os.mkdir(self.storage_path)
+
+        config = self.default_config()
+        config.url_preview_enabled = True
+        config.max_spider_size = 9999999
+        config.url_preview_ip_range_blacklist = IPSet(
+            (
+                "192.168.1.1",
+                "1.0.0.0/8",
+                "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff",
+                "2001:800::/21",
+            )
+        )
+        config.url_preview_ip_range_whitelist = IPSet(("1.1.1.1",))
+        config.url_preview_url_blacklist = []
+        config.media_store_path = self.storage_path
+
+        provider_config = {
+            "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend",
+            "store_local": True,
+            "store_synchronous": False,
+            "store_remote": True,
+            "config": {"directory": self.storage_path},
+        }
+
+        loaded = list(load_module(provider_config)) + [
+            MediaStorageProviderConfig(False, False, False)
+        ]
+
+        config.media_storage_providers = [loaded]
+
+        hs = self.setup_test_homeserver(config=config)
+
+        return hs
+
+    def prepare(self, reactor, clock, hs):
+
+        self.media_repo = hs.get_media_repository_resource()
+        self.preview_url = self.media_repo.children[b'preview_url']
+
+        self.lookups = {}
+
+        class Resolver(object):
+            def resolveHostName(
+                _self,
+                resolutionReceiver,
+                hostName,
+                portNumber=0,
+                addressTypes=None,
+                transportSemantics='TCP',
+            ):
+
+                resolution = HostResolution(hostName)
+                resolutionReceiver.resolutionBegan(resolution)
+                if hostName not in self.lookups:
+                    raise DNSLookupError("OH NO")
+
+                for i in self.lookups[hostName]:
+                    resolutionReceiver.addressResolved(i[0]('TCP', i[1], portNumber))
+                resolutionReceiver.resolutionComplete()
+                return resolutionReceiver
+
+        self.reactor.nameResolver = Resolver()
+
+    def test_cache_returns_correct_type(self):
+        self.lookups["matrix.org"] = [(IPv4Address, "8.8.8.8")]
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://matrix.org", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        client = self.reactor.tcpClients[0][2].buildProtocol(None)
+        server = AccumulatingProtocol()
+        server.makeConnection(FakeTransport(client, self.reactor))
+        client.makeConnection(FakeTransport(server, self.reactor))
+        client.dataReceived(
+            b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n"
+            % (len(self.end_content),)
+            + self.end_content
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(
+            channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+        )
+
+        # Check the cache returns the correct response
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://matrix.org", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        # Check the cache response has the same content
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(
+            channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+        )
+
+        # Clear the in-memory cache
+        self.assertIn("http://matrix.org", self.preview_url._cache)
+        self.preview_url._cache.pop("http://matrix.org")
+        self.assertNotIn("http://matrix.org", self.preview_url._cache)
+
+        # Check the database cache returns the correct response
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://matrix.org", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        # Check the cache response has the same content
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(
+            channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+        )
+
+    def test_non_ascii_preview_httpequiv(self):
+        self.lookups["matrix.org"] = [(IPv4Address, "8.8.8.8")]
+
+        end_content = (
+            b'<html><head>'
+            b'<meta http-equiv="Content-Type" content="text/html; charset=windows-1251"/>'
+            b'<meta property="og:title" content="\xe4\xea\xe0" />'
+            b'<meta property="og:description" content="hi" />'
+            b'</head></html>'
+        )
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://matrix.org", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        client = self.reactor.tcpClients[0][2].buildProtocol(None)
+        server = AccumulatingProtocol()
+        server.makeConnection(FakeTransport(client, self.reactor))
+        client.makeConnection(FakeTransport(server, self.reactor))
+        client.dataReceived(
+            (
+                b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+                b"Content-Type: text/html; charset=\"utf8\"\r\n\r\n"
+            )
+            % (len(end_content),)
+            + end_content
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(channel.json_body["og:title"], u"\u0434\u043a\u0430")
+
+    def test_non_ascii_preview_content_type(self):
+        self.lookups["matrix.org"] = [(IPv4Address, "8.8.8.8")]
+
+        end_content = (
+            b'<html><head>'
+            b'<meta property="og:title" content="\xe4\xea\xe0" />'
+            b'<meta property="og:description" content="hi" />'
+            b'</head></html>'
+        )
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://matrix.org", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        client = self.reactor.tcpClients[0][2].buildProtocol(None)
+        server = AccumulatingProtocol()
+        server.makeConnection(FakeTransport(client, self.reactor))
+        client.makeConnection(FakeTransport(server, self.reactor))
+        client.dataReceived(
+            (
+                b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+                b"Content-Type: text/html; charset=\"windows-1251\"\r\n\r\n"
+            )
+            % (len(end_content),)
+            + end_content
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(channel.json_body["og:title"], u"\u0434\u043a\u0430")
+
+    def test_ipaddr(self):
+        """
+        IP addresses can be previewed directly.
+        """
+        self.lookups["example.com"] = [(IPv4Address, "8.8.8.8")]
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://example.com", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        client = self.reactor.tcpClients[0][2].buildProtocol(None)
+        server = AccumulatingProtocol()
+        server.makeConnection(FakeTransport(client, self.reactor))
+        client.makeConnection(FakeTransport(server, self.reactor))
+        client.dataReceived(
+            b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n"
+            % (len(self.end_content),)
+            + self.end_content
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(
+            channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+        )
+
+    def test_blacklisted_ip_specific(self):
+        """
+        Blacklisted IP addresses, found via DNS, are not spidered.
+        """
+        self.lookups["example.com"] = [(IPv4Address, "192.168.1.1")]
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://example.com", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        # No requests made.
+        self.assertEqual(len(self.reactor.tcpClients), 0)
+        self.assertEqual(channel.code, 403)
+        self.assertEqual(
+            channel.json_body,
+            {
+                'errcode': 'M_UNKNOWN',
+                'error': 'IP address blocked by IP blacklist entry',
+            },
+        )
+
+    def test_blacklisted_ip_range(self):
+        """
+        Blacklisted IP ranges, IPs found over DNS, are not spidered.
+        """
+        self.lookups["example.com"] = [(IPv4Address, "1.1.1.2")]
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://example.com", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        self.assertEqual(channel.code, 403)
+        self.assertEqual(
+            channel.json_body,
+            {
+                'errcode': 'M_UNKNOWN',
+                'error': 'IP address blocked by IP blacklist entry',
+            },
+        )
+
+    def test_blacklisted_ip_specific_direct(self):
+        """
+        Blacklisted IP addresses, accessed directly, are not spidered.
+        """
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://192.168.1.1", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        # No requests made.
+        self.assertEqual(len(self.reactor.tcpClients), 0)
+        self.assertEqual(channel.code, 403)
+        self.assertEqual(
+            channel.json_body,
+            {
+                'errcode': 'M_UNKNOWN',
+                'error': 'IP address blocked by IP blacklist entry',
+            },
+        )
+
+    def test_blacklisted_ip_range_direct(self):
+        """
+        Blacklisted IP ranges, accessed directly, are not spidered.
+        """
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://1.1.1.2", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        self.assertEqual(channel.code, 403)
+        self.assertEqual(
+            channel.json_body,
+            {
+                'errcode': 'M_UNKNOWN',
+                'error': 'IP address blocked by IP blacklist entry',
+            },
+        )
+
+    def test_blacklisted_ip_range_whitelisted_ip(self):
+        """
+        Blacklisted but then subsequently whitelisted IP addresses can be
+        spidered.
+        """
+        self.lookups["example.com"] = [(IPv4Address, "1.1.1.1")]
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://example.com", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        client = self.reactor.tcpClients[0][2].buildProtocol(None)
+
+        server = AccumulatingProtocol()
+        server.makeConnection(FakeTransport(client, self.reactor))
+        client.makeConnection(FakeTransport(server, self.reactor))
+
+        client.dataReceived(
+            b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n"
+            % (len(self.end_content),)
+            + self.end_content
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(
+            channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+        )
+
+    def test_blacklisted_ip_with_external_ip(self):
+        """
+        If a hostname resolves a blacklisted IP, even if there's a
+        non-blacklisted one, it will be rejected.
+        """
+        # Hardcode the URL resolving to the IP we want.
+        self.lookups[u"example.com"] = [
+            (IPv4Address, "1.1.1.2"),
+            (IPv4Address, "8.8.8.8"),
+        ]
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://example.com", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+        self.assertEqual(channel.code, 403)
+        self.assertEqual(
+            channel.json_body,
+            {
+                'errcode': 'M_UNKNOWN',
+                'error': 'IP address blocked by IP blacklist entry',
+            },
+        )
+
+    def test_blacklisted_ipv6_specific(self):
+        """
+        Blacklisted IP addresses, found via DNS, are not spidered.
+        """
+        self.lookups["example.com"] = [
+            (IPv6Address, "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
+        ]
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://example.com", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        # No requests made.
+        self.assertEqual(len(self.reactor.tcpClients), 0)
+        self.assertEqual(channel.code, 403)
+        self.assertEqual(
+            channel.json_body,
+            {
+                'errcode': 'M_UNKNOWN',
+                'error': 'IP address blocked by IP blacklist entry',
+            },
+        )
+
+    def test_blacklisted_ipv6_range(self):
+        """
+        Blacklisted IP ranges, IPs found over DNS, are not spidered.
+        """
+        self.lookups["example.com"] = [(IPv6Address, "2001:800::1")]
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://example.com", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        self.assertEqual(channel.code, 403)
+        self.assertEqual(
+            channel.json_body,
+            {
+                'errcode': 'M_UNKNOWN',
+                'error': 'IP address blocked by IP blacklist entry',
+            },
+        )
diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py
new file mode 100644
index 0000000000..8d8f03e005
--- /dev/null
+++ b/tests/rest/test_well_known.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from synapse.rest.well_known import WellKnownResource
+
+from tests import unittest
+
+
+class WellKnownTests(unittest.HomeserverTestCase):
+    def setUp(self):
+        super(WellKnownTests, self).setUp()
+
+        # replace the JsonResource with a WellKnownResource
+        self.resource = WellKnownResource(self.hs)
+
+    def test_well_known(self):
+        self.hs.config.public_baseurl = "https://tesths"
+        self.hs.config.default_identity_server = "https://testis"
+
+        request, channel = self.make_request(
+            "GET",
+            "/.well-known/matrix/client",
+            shorthand=False,
+        )
+        self.render(request)
+
+        self.assertEqual(request.code, 200)
+        self.assertEqual(
+            channel.json_body, {
+                "m.homeserver": {"base_url": "https://tesths"},
+                "m.identity_server": {"base_url": "https://testis"},
+            }
+        )
+
+    def test_well_known_no_public_baseurl(self):
+        self.hs.config.public_baseurl = None
+
+        request, channel = self.make_request(
+            "GET",
+            "/.well-known/matrix/client",
+            shorthand=False,
+        )
+        self.render(request)
+
+        self.assertEqual(request.code, 404)
diff --git a/tests/server.py b/tests/server.py
index 7bee58dff1..fc1e76d146 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -1,4 +1,5 @@
 import json
+import logging
 from io import BytesIO
 
 from six import text_type
@@ -7,19 +8,28 @@ import attr
 from zope.interface import implementer
 
 from twisted.internet import address, threads, udp
-from twisted.internet._resolver import HostResolution
-from twisted.internet.address import IPv4Address
-from twisted.internet.defer import Deferred
+from twisted.internet._resolver import SimpleResolverComplexifier
+from twisted.internet.defer import Deferred, fail, succeed
 from twisted.internet.error import DNSLookupError
-from twisted.internet.interfaces import IReactorPluggableNameResolver
+from twisted.internet.interfaces import IReactorPluggableNameResolver, IResolverSimple
 from twisted.python.failure import Failure
 from twisted.test.proto_helpers import MemoryReactorClock
+from twisted.web.http import unquote
+from twisted.web.http_headers import Headers
 
 from synapse.http.site import SynapseRequest
 from synapse.util import Clock
 
 from tests.utils import setup_test_homeserver as _sth
 
+logger = logging.getLogger(__name__)
+
+
+class TimedOutException(Exception):
+    """
+    A web query timed out.
+    """
+
 
 @attr.s
 class FakeChannel(object):
@@ -28,6 +38,7 @@ class FakeChannel(object):
     wire).
     """
 
+    _reactor = attr.ib()
     result = attr.ib(default=attr.Factory(dict))
     _producer = None
 
@@ -43,6 +54,15 @@ class FakeChannel(object):
             raise Exception("No result yet.")
         return int(self.result["code"])
 
+    @property
+    def headers(self):
+        if not self.result:
+            raise Exception("No result yet.")
+        h = Headers()
+        for i in self.result["headers"]:
+            h.addRawHeader(*i)
+        return h
+
     def writeHeaders(self, version, code, reason, headers):
         self.result["version"] = version
         self.result["code"] = code
@@ -50,6 +70,8 @@ class FakeChannel(object):
         self.result["headers"] = headers
 
     def write(self, content):
+        assert isinstance(content, bytes), "Should be bytes! " + repr(content)
+
         if "body" not in self.result:
             self.result["body"] = b""
 
@@ -57,6 +79,15 @@ class FakeChannel(object):
 
     def registerProducer(self, producer, streaming):
         self._producer = producer
+        self.producerStreaming = streaming
+
+        def _produce():
+            if self._producer:
+                self._producer.resumeProducing()
+                self._reactor.callLater(0.1, _produce)
+
+        if not streaming:
+            self._reactor.callLater(0.0, _produce)
 
     def unregisterProducer(self):
         if self._producer is None:
@@ -98,10 +129,30 @@ class FakeSite:
         return FakeLogger()
 
 
-def make_request(method, path, content=b"", access_token=None, request=SynapseRequest):
+def make_request(
+    reactor,
+    method,
+    path,
+    content=b"",
+    access_token=None,
+    request=SynapseRequest,
+    shorthand=True,
+):
     """
     Make a web request using the given method and path, feed it the
     content, and return the Request and the Channel underneath.
+
+    Args:
+        method (bytes/unicode): The HTTP request method ("verb").
+        path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
+        escaped UTF-8 & spaces and such).
+        content (bytes or dict): The body of the request. JSON-encoded, if
+        a dict.
+        shorthand: Whether to try and be helpful and prefix the given URL
+        with the usual REST API path, if it doesn't contain it.
+
+    Returns:
+        A synapse.http.site.SynapseRequest.
     """
     if not isinstance(method, bytes):
         method = method.encode('ascii')
@@ -109,23 +160,29 @@ def make_request(method, path, content=b"", access_token=None, request=SynapseRe
     if not isinstance(path, bytes):
         path = path.encode('ascii')
 
-    # Decorate it to be the full path
-    if not path.startswith(b"/_matrix"):
+    # Decorate it to be the full path, if we're using shorthand
+    if shorthand and not path.startswith(b"/_matrix"):
         path = b"/_matrix/client/r0/" + path
         path = path.replace(b"//", b"/")
 
+    if not path.startswith(b"/"):
+        path = b"/" + path
+
     if isinstance(content, text_type):
         content = content.encode('utf8')
 
     site = FakeSite()
-    channel = FakeChannel()
+    channel = FakeChannel(reactor)
 
     req = request(site, channel)
     req.process = lambda: b""
     req.content = BytesIO(content)
+    req.postpath = list(map(unquote, path[1:].split(b'/')))
 
     if access_token:
-        req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token)
+        req.requestHeaders.addRawHeader(
+            b"Authorization", b"Bearer " + access_token.encode('ascii')
+        )
 
     if content:
         req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
@@ -151,7 +208,7 @@ def wait_until_result(clock, request, timeout=100):
         x += 1
 
         if x > timeout:
-            raise Exception("Timed out waiting for request to finish.")
+            raise TimedOutException("Timed out waiting for request to finish.")
 
         clock.advance(0.1)
 
@@ -169,30 +226,16 @@ class ThreadedMemoryReactorClock(MemoryReactorClock):
 
     def __init__(self):
         self._udp = []
-        self.lookups = {}
-
-        class Resolver(object):
-            def resolveHostName(
-                _self,
-                resolutionReceiver,
-                hostName,
-                portNumber=0,
-                addressTypes=None,
-                transportSemantics='TCP',
-            ):
-
-                resolution = HostResolution(hostName)
-                resolutionReceiver.resolutionBegan(resolution)
-                if hostName not in self.lookups:
-                    raise DNSLookupError("OH NO")
-
-                resolutionReceiver.addressResolved(
-                    IPv4Address('TCP', self.lookups[hostName], portNumber)
-                )
-                resolutionReceiver.resolutionComplete()
-                return resolution
-
-        self.nameResolver = Resolver()
+        lookups = self.lookups = {}
+
+        @implementer(IResolverSimple)
+        class FakeResolver(object):
+            def getHostByName(self, name, timeout=None):
+                if name not in lookups:
+                    return fail(DNSLookupError("OH NO: unknown %s" % (name, )))
+                return succeed(lookups[name])
+
+        self.nameResolver = SimpleResolverComplexifier(FakeResolver())
         super(ThreadedMemoryReactorClock, self).__init__()
 
     def listenUDP(self, port, protocol, interface='', maxPacketSize=8196):
@@ -284,7 +327,7 @@ def get_clock():
     return (clock, hs_clock)
 
 
-@attr.s
+@attr.s(cmp=False)
 class FakeTransport(object):
     """
     A twisted.internet.interfaces.ITransport implementation which sends all its data
@@ -311,7 +354,13 @@ class FakeTransport(object):
     :type: twisted.internet.interfaces.IReactorTime
     """
 
+    _protocol = attr.ib(default=None)
+    """The Protocol which is producing data for this transport. Optional, but if set
+    will get called back for connectionLost() notifications etc.
+    """
+
     disconnecting = False
+    disconnected = False
     buffer = attr.ib(default=b'')
     producer = attr.ib(default=None)
 
@@ -321,15 +370,29 @@ class FakeTransport(object):
     def getHost(self):
         return None
 
-    def loseConnection(self):
-        self.disconnecting = True
+    def loseConnection(self, reason=None):
+        if not self.disconnecting:
+            logger.info("FakeTransport: loseConnection(%s)", reason)
+            self.disconnecting = True
+            if self._protocol:
+                self._protocol.connectionLost(reason)
+            self.disconnected = True
 
     def abortConnection(self):
-        self.disconnecting = True
+        logger.info("FakeTransport: abortConnection()")
+        self.loseConnection()
 
     def pauseProducing(self):
+        if not self.producer:
+            return
+
         self.producer.pauseProducing()
 
+    def resumeProducing(self):
+        if not self.producer:
+            return
+        self.producer.resumeProducing()
+
     def unregisterProducer(self):
         if not self.producer:
             return
@@ -351,14 +414,29 @@ class FakeTransport(object):
         self.buffer = self.buffer + byt
 
         def _write():
+            if not self.buffer:
+                # nothing to do. Don't write empty buffers: it upsets the
+                # TLSMemoryBIOProtocol
+                return
+
+            if self.disconnected:
+                return
+            logger.info("%s->%s: %s", self._protocol, self.other, self.buffer)
+
             if getattr(self.other, "transport") is not None:
-                self.other.dataReceived(self.buffer)
-                self.buffer = b""
+                try:
+                    self.other.dataReceived(self.buffer)
+                    self.buffer = b""
+                except Exception as e:
+                    logger.warning("Exception writing to protocol: %s", e)
                 return
 
             self._reactor.callLater(0.0, _write)
 
-        _write()
+        # always actually do the write asynchronously. Some protocols (notably the
+        # TLSMemoryBIOProtocol) get very confused if a read comes back while they are
+        # still doing a write. Doing a callLater here breaks the cycle.
+        self._reactor.callLater(0.0, _write)
 
     def writeSequence(self, seq):
         for x in seq:
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index 4701eedd45..b1551df7ca 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -4,7 +4,6 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, ServerNoticeMsgType
 from synapse.api.errors import ResourceLimitError
-from synapse.handlers.auth import AuthHandler
 from synapse.server_notices.resource_limits_server_notices import (
     ResourceLimitsServerNotices,
 )
@@ -13,17 +12,10 @@ from tests import unittest
 from tests.utils import setup_test_homeserver
 
 
-class AuthHandlers(object):
-    def __init__(self, hs):
-        self.auth_handler = AuthHandler(hs)
-
-
 class TestResourceLimitsServerNotices(unittest.TestCase):
     @defer.inlineCallbacks
     def setUp(self):
-        self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None)
-        self.hs.handlers = AuthHandlers(self.hs)
-        self.auth_handler = self.hs.handlers.auth_handler
+        self.hs = yield setup_test_homeserver(self.addCleanup)
         self.server_notices_sender = self.hs.get_server_notices_sender()
 
         # relying on [1] is far from ideal, but the only case where
diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py
index efd85ebe6c..9a5c816927 100644
--- a/tests/state/test_v2.py
+++ b/tests/state/test_v2.py
@@ -19,7 +19,7 @@ from six.moves import zip
 
 import attr
 
-from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.constants import EventTypes, JoinRules, Membership, RoomVersions
 from synapse.event_auth import auth_types_for_event
 from synapse.events import FrozenEvent
 from synapse.state.v2 import lexicographical_topological_sort, resolve_events_with_store
@@ -539,13 +539,13 @@ class StateTestCase(unittest.TestCase):
                 state_before = dict(state_at_event[prev_events[0]])
             else:
                 state_d = resolve_events_with_store(
+                    RoomVersions.V2,
                     [state_at_event[n] for n in prev_events],
                     event_map=event_map,
                     state_res_store=TestStateResolutionStore(event_map),
                 )
 
-                self.assertTrue(state_d.called)
-                state_before = state_d.result
+                state_before = self.successResultOf(state_d)
 
             state_after = dict(state_before)
             if fake_event.state_key is not None:
@@ -599,6 +599,104 @@ class LexicographicalTestCase(unittest.TestCase):
         self.assertEqual(["o", "l", "n", "m", "p"], res)
 
 
+class SimpleParamStateTestCase(unittest.TestCase):
+    def setUp(self):
+        # We build up a simple DAG.
+
+        event_map = {}
+
+        create_event = FakeEvent(
+            id="CREATE",
+            sender=ALICE,
+            type=EventTypes.Create,
+            state_key="",
+            content={"creator": ALICE},
+        ).to_event([], [])
+        event_map[create_event.event_id] = create_event
+
+        alice_member = FakeEvent(
+            id="IMA",
+            sender=ALICE,
+            type=EventTypes.Member,
+            state_key=ALICE,
+            content=MEMBERSHIP_CONTENT_JOIN,
+        ).to_event([create_event.event_id], [create_event.event_id])
+        event_map[alice_member.event_id] = alice_member
+
+        join_rules = FakeEvent(
+            id="IJR",
+            sender=ALICE,
+            type=EventTypes.JoinRules,
+            state_key="",
+            content={"join_rule": JoinRules.PUBLIC},
+        ).to_event(
+            auth_events=[create_event.event_id, alice_member.event_id],
+            prev_events=[alice_member.event_id],
+        )
+        event_map[join_rules.event_id] = join_rules
+
+        # Bob and Charlie join at the same time, so there is a fork
+        bob_member = FakeEvent(
+            id="IMB",
+            sender=BOB,
+            type=EventTypes.Member,
+            state_key=BOB,
+            content=MEMBERSHIP_CONTENT_JOIN,
+        ).to_event(
+            auth_events=[create_event.event_id, join_rules.event_id],
+            prev_events=[join_rules.event_id],
+        )
+        event_map[bob_member.event_id] = bob_member
+
+        charlie_member = FakeEvent(
+            id="IMC",
+            sender=CHARLIE,
+            type=EventTypes.Member,
+            state_key=CHARLIE,
+            content=MEMBERSHIP_CONTENT_JOIN,
+        ).to_event(
+            auth_events=[create_event.event_id, join_rules.event_id],
+            prev_events=[join_rules.event_id],
+        )
+        event_map[charlie_member.event_id] = charlie_member
+
+        self.event_map = event_map
+        self.create_event = create_event
+        self.alice_member = alice_member
+        self.join_rules = join_rules
+        self.bob_member = bob_member
+        self.charlie_member = charlie_member
+
+        self.state_at_bob = {
+            (e.type, e.state_key): e.event_id
+            for e in [create_event, alice_member, join_rules, bob_member]
+        }
+
+        self.state_at_charlie = {
+            (e.type, e.state_key): e.event_id
+            for e in [create_event, alice_member, join_rules, charlie_member]
+        }
+
+        self.expected_combined_state = {
+            (e.type, e.state_key): e.event_id
+            for e in [create_event, alice_member, join_rules, bob_member, charlie_member]
+        }
+
+    def test_event_map_none(self):
+        # Test that we correctly handle passing `None` as the event_map
+
+        state_d = resolve_events_with_store(
+            RoomVersions.V2,
+            [self.state_at_bob, self.state_at_charlie],
+            event_map=None,
+            state_res_store=TestStateResolutionStore(self.event_map),
+        )
+
+        state = self.successResultOf(state_d)
+
+        self.assert_dict(self.expected_combined_state, state)
+
+
 def pairwise(iterable):
     "s -> (s0,s1), (s1,s2), (s2, s3), ..."
     a, b = itertools.tee(iterable)
@@ -657,7 +755,7 @@ class TestStateResolutionStore(object):
             result.add(event_id)
 
             event = self.event_map[event_id]
-            for aid, _ in event.auth_events:
+            for aid in event.auth_event_ids():
                 stack.append(aid)
 
         return list(result)
diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py
index 52eb05bfbf..dd49a14524 100644
--- a/tests/storage/test__base.py
+++ b/tests/storage/test__base.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -314,3 +315,90 @@ class CacheDecoratorTestCase(unittest.TestCase):
 
         self.assertEquals(callcount[0], 2)
         self.assertEquals(callcount2[0], 3)
+
+
+class UpsertManyTests(unittest.HomeserverTestCase):
+    def prepare(self, reactor, clock, hs):
+        self.storage = hs.get_datastore()
+
+        self.table_name = "table_" + hs.get_secrets().token_hex(6)
+        self.get_success(
+            self.storage.runInteraction(
+                "create",
+                lambda x, *a: x.execute(*a),
+                "CREATE TABLE %s (id INTEGER, username TEXT, value TEXT)"
+                % (self.table_name,),
+            )
+        )
+        self.get_success(
+            self.storage.runInteraction(
+                "index",
+                lambda x, *a: x.execute(*a),
+                "CREATE UNIQUE INDEX %sindex ON %s(id, username)"
+                % (self.table_name, self.table_name),
+            )
+        )
+
+    def _dump_to_tuple(self, res):
+        for i in res:
+            yield (i["id"], i["username"], i["value"])
+
+    def test_upsert_many(self):
+        """
+        Upsert_many will perform the upsert operation across a batch of data.
+        """
+        # Add some data to an empty table
+        key_names = ["id", "username"]
+        value_names = ["value"]
+        key_values = [[1, "user1"], [2, "user2"]]
+        value_values = [["hello"], ["there"]]
+
+        self.get_success(
+            self.storage.runInteraction(
+                "test",
+                self.storage._simple_upsert_many_txn,
+                self.table_name,
+                key_names,
+                key_values,
+                value_names,
+                value_values,
+            )
+        )
+
+        # Check results are what we expect
+        res = self.get_success(
+            self.storage._simple_select_list(
+                self.table_name, None, ["id, username, value"]
+            )
+        )
+        self.assertEqual(
+            set(self._dump_to_tuple(res)),
+            set([(1, "user1", "hello"), (2, "user2", "there")]),
+        )
+
+        # Update only user2
+        key_values = [[2, "user2"]]
+        value_values = [["bleb"]]
+
+        self.get_success(
+            self.storage.runInteraction(
+                "test",
+                self.storage._simple_upsert_many_txn,
+                self.table_name,
+                key_names,
+                key_values,
+                value_names,
+                value_values,
+            )
+        )
+
+        # Check results are what we expect
+        res = self.get_success(
+            self.storage._simple_select_list(
+                self.table_name, None, ["id, username, value"]
+            )
+        )
+        self.assertEqual(
+            set(self._dump_to_tuple(res)),
+            set([(1, "user1", "hello"), (2, "user2", "bleb")]),
+        )
diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py
index 81403727c5..5568a607c7 100644
--- a/tests/storage/test_background_update.py
+++ b/tests/storage/test_background_update.py
@@ -11,7 +11,7 @@ class BackgroundUpdateTestCase(unittest.TestCase):
     def setUp(self):
         hs = yield setup_test_homeserver(
             self.addCleanup
-        )  # type: synapse.server.HomeServer
+        )
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
 
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index 829f47d2e8..f18db8c384 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -49,13 +49,17 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         self.db_pool.runWithConnection = runWithConnection
 
         config = Mock()
+        config._disable_native_upserts = True
         config.event_cache_size = 1
         config.database_config = {"name": "sqlite3"}
+        engine = create_engine(config.database_config)
+        fake_engine = Mock(wraps=engine)
+        fake_engine.can_native_upsert = False
         hs = TestHomeServer(
             "test",
             db_pool=self.db_pool,
             config=config,
-            database_engine=create_engine(config.database_config),
+            database_engine=fake_engine,
         )
 
         self.datastore = SQLBaseStore(None, hs)
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index 4577e9422b..858efe4992 100644
--- a/tests/storage/test_client_ips.py
+++ b/tests/storage/test_client_ips.py
@@ -62,6 +62,77 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
             r,
         )
 
+    def test_insert_new_client_ip_none_device_id(self):
+        """
+        An insert with a device ID of NULL will not create a new entry, but
+        update an existing entry in the user_ips table.
+        """
+        self.reactor.advance(12345678)
+
+        user_id = "@user:id"
+
+        # Add & trigger the storage loop
+        self.get_success(
+            self.store.insert_client_ip(
+                user_id, "access_token", "ip", "user_agent", None
+            )
+        )
+        self.reactor.advance(200)
+        self.pump(0)
+
+        result = self.get_success(
+            self.store._simple_select_list(
+                table="user_ips",
+                keyvalues={"user_id": user_id},
+                retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"],
+                desc="get_user_ip_and_agents",
+            )
+        )
+
+        self.assertEqual(
+            result,
+            [
+                {
+                    'access_token': 'access_token',
+                    'ip': 'ip',
+                    'user_agent': 'user_agent',
+                    'device_id': None,
+                    'last_seen': 12345678000,
+                }
+            ],
+        )
+
+        # Add another & trigger the storage loop
+        self.get_success(
+            self.store.insert_client_ip(
+                user_id, "access_token", "ip", "user_agent", None
+            )
+        )
+        self.reactor.advance(10)
+        self.pump(0)
+
+        result = self.get_success(
+            self.store._simple_select_list(
+                table="user_ips",
+                keyvalues={"user_id": user_id},
+                retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"],
+                desc="get_user_ip_and_agents",
+            )
+        )
+        # Only one result, has been upserted.
+        self.assertEqual(
+            result,
+            [
+                {
+                    'access_token': 'access_token',
+                    'ip': 'ip',
+                    'user_agent': 'user_agent',
+                    'device_id': None,
+                    'last_seen': 12345878000,
+                }
+            ],
+        )
+
     def test_disabled_monthly_active_user(self):
         self.hs.config.limit_usage_by_mau = False
         self.hs.config.max_mau_value = 50
diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py
index 8f0aaece40..11fb8c0c19 100644
--- a/tests/storage/test_end_to_end_keys.py
+++ b/tests/storage/test_end_to_end_keys.py
@@ -20,9 +20,6 @@ import tests.utils
 
 
 class EndToEndKeyStoreTestCase(tests.unittest.TestCase):
-    def __init__(self, *args, **kwargs):
-        super(EndToEndKeyStoreTestCase, self).__init__(*args, **kwargs)
-        self.store = None  # type: synapse.storage.DataStore
 
     @defer.inlineCallbacks
     def setUp(self):
@@ -45,6 +42,21 @@ class EndToEndKeyStoreTestCase(tests.unittest.TestCase):
         self.assertDictContainsSubset({"keys": json, "device_display_name": None}, dev)
 
     @defer.inlineCallbacks
+    def test_reupload_key(self):
+        now = 1470174257070
+        json = {"key": "value"}
+
+        yield self.store.store_device("user", "device", None)
+
+        changed = yield self.store.set_e2e_device_keys("user", "device", now, json)
+        self.assertTrue(changed)
+
+        # If we try to upload the same key then we should be told nothing
+        # changed
+        changed = yield self.store.set_e2e_device_keys("user", "device", now, json)
+        self.assertFalse(changed)
+
+    @defer.inlineCallbacks
     def test_get_key_with_device_name(self):
         now = 1470174257070
         json = {"key": "value"}
diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py
index 47f4a8ceac..0d2dc9f325 100644
--- a/tests/storage/test_keys.py
+++ b/tests/storage/test_keys.py
@@ -22,9 +22,6 @@ import tests.utils
 
 
 class KeyStoreTestCase(tests.unittest.TestCase):
-    def __init__(self, *args, **kwargs):
-        super(KeyStoreTestCase, self).__init__(*args, **kwargs)
-        self.store = None  # type: synapse.storage.keys.KeyStore
 
     @defer.inlineCallbacks
     def setUp(self):
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index 832e379a83..d6569a82bb 100644
--- a/tests/storage/test_monthly_active_users.py
+++ b/tests/storage/test_monthly_active_users.py
@@ -16,18 +16,21 @@ from mock import Mock
 
 from twisted.internet import defer
 
-from tests.unittest import HomeserverTestCase
+from synapse.api.constants import UserTypes
+
+from tests import unittest
 
 FORTY_DAYS = 40 * 24 * 60 * 60
 
 
-class MonthlyActiveUsersTestCase(HomeserverTestCase):
+class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
     def make_homeserver(self, reactor, clock):
 
         hs = self.setup_test_homeserver()
         self.store = hs.get_datastore()
         hs.config.limit_usage_by_mau = True
         hs.config.max_mau_value = 50
+
         # Advance the clock a bit
         reactor.advance(FORTY_DAYS)
 
@@ -39,14 +42,23 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase):
         user1_email = "user1@matrix.org"
         user2 = "@user2:server"
         user2_email = "user2@matrix.org"
+        user3 = "@user3:server"
+        user3_email = "user3@matrix.org"
+
         threepids = [
             {'medium': 'email', 'address': user1_email},
             {'medium': 'email', 'address': user2_email},
+            {'medium': 'email', 'address': user3_email},
         ]
-        user_num = len(threepids)
+        # -1 because user3 is a support user and does not count
+        user_num = len(threepids) - 1
 
         self.store.register(user_id=user1, token="123", password_hash=None)
         self.store.register(user_id=user2, token="456", password_hash=None)
+        self.store.register(
+            user_id=user3, token="789",
+            password_hash=None, user_type=UserTypes.SUPPORT
+        )
         self.pump()
 
         now = int(self.hs.get_clock().time_msec())
@@ -60,7 +72,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase):
 
         active_count = self.store.get_monthly_active_count()
 
-        # Test total counts
+        # Test total counts, ensure user3 (support user) is not counted
         self.assertEquals(self.get_success(active_count), user_num)
 
         # Test user is marked as active
@@ -149,7 +161,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase):
 
     def test_populate_monthly_users_is_guest(self):
         # Test that guest users are not added to mau list
-        user_id = "user_id"
+        user_id = "@user_id:host"
         self.store.register(
             user_id=user_id, token="123", password_hash=None, make_guest=True
         )
@@ -220,3 +232,46 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase):
         self.store.user_add_threepid(user2, "email", user2_email, now, now)
         count = self.store.get_registered_reserved_users_count()
         self.assertEquals(self.get_success(count), len(threepids))
+
+    def test_support_user_not_add_to_mau_limits(self):
+        support_user_id = "@support:test"
+        count = self.store.get_monthly_active_count()
+        self.pump()
+        self.assertEqual(self.get_success(count), 0)
+
+        self.store.register(
+            user_id=support_user_id,
+            token="123",
+            password_hash=None,
+            user_type=UserTypes.SUPPORT
+        )
+
+        self.store.upsert_monthly_active_user(support_user_id)
+        count = self.store.get_monthly_active_count()
+        self.pump()
+        self.assertEqual(self.get_success(count), 0)
+
+    def test_track_monthly_users_without_cap(self):
+        self.hs.config.limit_usage_by_mau = False
+        self.hs.config.mau_stats_only = True
+        self.hs.config.max_mau_value = 1  # should not matter
+
+        count = self.store.get_monthly_active_count()
+        self.assertEqual(0, self.get_success(count))
+
+        self.store.upsert_monthly_active_user("@user1:server")
+        self.store.upsert_monthly_active_user("@user2:server")
+        self.pump()
+
+        count = self.store.get_monthly_active_count()
+        self.assertEqual(2, self.get_success(count))
+
+    def test_no_users_when_not_tracking(self):
+        self.hs.config.limit_usage_by_mau = False
+        self.hs.config.mau_stats_only = False
+        self.store.upsert_monthly_active_user = Mock()
+
+        self.store.populate_monthly_active_users("@user:sever")
+        self.pump()
+
+        self.store.upsert_monthly_active_user.assert_not_called()
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index 02bf975fbf..3957561b1e 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -18,7 +18,7 @@ from mock import Mock
 
 from twisted.internet import defer
 
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventTypes, Membership, RoomVersions
 from synapse.types import RoomID, UserID
 
 from tests import unittest
@@ -52,6 +52,7 @@ class RedactionTestCase(unittest.TestCase):
         content = {"membership": membership}
         content.update(extra_content)
         builder = self.event_builder_factory.new(
+            RoomVersions.V1,
             {
                 "type": EventTypes.Member,
                 "sender": user.to_string(),
@@ -74,6 +75,7 @@ class RedactionTestCase(unittest.TestCase):
         self.depth += 1
 
         builder = self.event_builder_factory.new(
+            RoomVersions.V1,
             {
                 "type": EventTypes.Message,
                 "sender": user.to_string(),
@@ -94,6 +96,7 @@ class RedactionTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def inject_redaction(self, room, event_id, user, reason):
         builder = self.event_builder_factory.new(
+            RoomVersions.V1,
             {
                 "type": EventTypes.Redaction,
                 "sender": user.to_string(),
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index 3dfb7b903a..cb3cc4d2e5 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -16,6 +16,8 @@
 
 from twisted.internet import defer
 
+from synapse.api.constants import UserTypes
+
 from tests import unittest
 from tests.utils import setup_test_homeserver
 
@@ -99,6 +101,26 @@ class RegistrationStoreTestCase(unittest.TestCase):
         user = yield self.store.get_user_by_access_token(self.tokens[0])
         self.assertIsNone(user, "access token was not deleted without device_id")
 
+    @defer.inlineCallbacks
+    def test_is_support_user(self):
+        TEST_USER = "@test:test"
+        SUPPORT_USER = "@support:test"
+
+        res = yield self.store.is_support_user(None)
+        self.assertFalse(res)
+        yield self.store.register(user_id=TEST_USER, token="123", password_hash=None)
+        res = yield self.store.is_support_user(TEST_USER)
+        self.assertFalse(res)
+
+        yield self.store.register(
+            user_id=SUPPORT_USER,
+            token="456",
+            password_hash=None,
+            user_type=UserTypes.SUPPORT
+        )
+        res = yield self.store.is_support_user(SUPPORT_USER)
+        self.assertTrue(res)
+
 
 class TokenGenerator:
     def __init__(self):
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 978c66133d..7fa2f4fd70 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -18,7 +18,7 @@ from mock import Mock
 
 from twisted.internet import defer
 
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventTypes, Membership, RoomVersions
 from synapse.types import RoomID, UserID
 
 from tests import unittest
@@ -50,6 +50,7 @@ class RoomMemberStoreTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def inject_room_member(self, room, user, membership, replaces_state=None):
         builder = self.event_builder_factory.new(
+            RoomVersions.V1,
             {
                 "type": EventTypes.Member,
                 "sender": user.to_string(),
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
index 086a39d834..99cd3e09eb 100644
--- a/tests/storage/test_state.py
+++ b/tests/storage/test_state.py
@@ -17,7 +17,7 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventTypes, Membership, RoomVersions
 from synapse.storage.state import StateFilter
 from synapse.types import RoomID, UserID
 
@@ -28,9 +28,6 @@ logger = logging.getLogger(__name__)
 
 
 class StateStoreTestCase(tests.unittest.TestCase):
-    def __init__(self, *args, **kwargs):
-        super(StateStoreTestCase, self).__init__(*args, **kwargs)
-        self.store = None  # type: synapse.storage.DataStore
 
     @defer.inlineCallbacks
     def setUp(self):
@@ -52,6 +49,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
     @defer.inlineCallbacks
     def inject_state_event(self, room, sender, typ, state_key, content):
         builder = self.event_builder_factory.new(
+            RoomVersions.V1,
             {
                 "type": typ,
                 "sender": sender.to_string(),
diff --git a/tests/test_dns.py b/tests/test_dns.py
deleted file mode 100644
index 90bd34be34..0000000000
--- a/tests/test_dns.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from mock import Mock
-
-from twisted.internet import defer
-from twisted.names import dns, error
-
-from synapse.http.endpoint import resolve_service
-
-from tests.utils import MockClock
-
-from . import unittest
-
-
-@unittest.DEBUG
-class DnsTestCase(unittest.TestCase):
-    @defer.inlineCallbacks
-    def test_resolve(self):
-        dns_client_mock = Mock()
-
-        service_name = "test_service.example.com"
-        host_name = "example.com"
-
-        answer_srv = dns.RRHeader(
-            type=dns.SRV, payload=dns.Record_SRV(target=host_name)
-        )
-
-        dns_client_mock.lookupService.return_value = defer.succeed(
-            ([answer_srv], None, None)
-        )
-
-        cache = {}
-
-        servers = yield resolve_service(
-            service_name, dns_client=dns_client_mock, cache=cache
-        )
-
-        dns_client_mock.lookupService.assert_called_once_with(service_name)
-
-        self.assertEquals(len(servers), 1)
-        self.assertEquals(servers, cache[service_name])
-        self.assertEquals(servers[0].host, host_name)
-
-    @defer.inlineCallbacks
-    def test_from_cache_expired_and_dns_fail(self):
-        dns_client_mock = Mock()
-        dns_client_mock.lookupService.return_value = defer.fail(error.DNSServerError())
-
-        service_name = "test_service.example.com"
-
-        entry = Mock(spec_set=["expires"])
-        entry.expires = 0
-
-        cache = {service_name: [entry]}
-
-        servers = yield resolve_service(
-            service_name, dns_client=dns_client_mock, cache=cache
-        )
-
-        dns_client_mock.lookupService.assert_called_once_with(service_name)
-
-        self.assertEquals(len(servers), 1)
-        self.assertEquals(servers, cache[service_name])
-
-    @defer.inlineCallbacks
-    def test_from_cache(self):
-        clock = MockClock()
-
-        dns_client_mock = Mock(spec_set=['lookupService'])
-        dns_client_mock.lookupService = Mock(spec_set=[])
-
-        service_name = "test_service.example.com"
-
-        entry = Mock(spec_set=["expires"])
-        entry.expires = 999999999
-
-        cache = {service_name: [entry]}
-
-        servers = yield resolve_service(
-            service_name, dns_client=dns_client_mock, cache=cache, clock=clock
-        )
-
-        self.assertFalse(dns_client_mock.lookupService.called)
-
-        self.assertEquals(len(servers), 1)
-        self.assertEquals(servers, cache[service_name])
-
-    @defer.inlineCallbacks
-    def test_empty_cache(self):
-        dns_client_mock = Mock()
-
-        dns_client_mock.lookupService.return_value = defer.fail(error.DNSServerError())
-
-        service_name = "test_service.example.com"
-
-        cache = {}
-
-        with self.assertRaises(error.DNSServerError):
-            yield resolve_service(service_name, dns_client=dns_client_mock, cache=cache)
-
-    @defer.inlineCallbacks
-    def test_name_error(self):
-        dns_client_mock = Mock()
-
-        dns_client_mock.lookupService.return_value = defer.fail(error.DNSNameError())
-
-        service_name = "test_service.example.com"
-
-        cache = {}
-
-        servers = yield resolve_service(
-            service_name, dns_client=dns_client_mock, cache=cache
-        )
-
-        self.assertEquals(len(servers), 0)
-        self.assertEquals(len(cache), 0)
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index 411b4a9f86..7ee318e4e8 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -16,6 +16,7 @@
 import unittest
 
 from synapse import event_auth
+from synapse.api.constants import RoomVersions
 from synapse.api.errors import AuthError
 from synapse.events import FrozenEvent
 
@@ -35,12 +36,16 @@ class EventAuthTestCase(unittest.TestCase):
         }
 
         # creator should be able to send state
-        event_auth.check(_random_state_event(creator), auth_events, do_sig_check=False)
+        event_auth.check(
+            RoomVersions.V1, _random_state_event(creator), auth_events,
+            do_sig_check=False,
+        )
 
         # joiner should not be able to send state
         self.assertRaises(
             AuthError,
             event_auth.check,
+            RoomVersions.V1,
             _random_state_event(joiner),
             auth_events,
             do_sig_check=False,
@@ -69,13 +74,17 @@ class EventAuthTestCase(unittest.TestCase):
         self.assertRaises(
             AuthError,
             event_auth.check,
+            RoomVersions.V1,
             _random_state_event(pleb),
             auth_events,
             do_sig_check=False,
         ),
 
         # king should be able to send state
-        event_auth.check(_random_state_event(king), auth_events, do_sig_check=False)
+        event_auth.check(
+            RoomVersions.V1, _random_state_event(king), auth_events,
+            do_sig_check=False,
+        )
 
 
 # helpers for making events
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 952a0a7b51..1a5dc32c88 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -112,7 +112,7 @@ class MessageAcceptTests(unittest.TestCase):
                 "origin_server_ts": 1,
                 "type": "m.room.message",
                 "origin": "test.serv",
-                "content": "hewwo?",
+                "content": {"body": "hewwo?"},
                 "auth_events": [],
                 "prev_events": [("two:test.serv", {}), (most_recent, {})],
             }
@@ -123,8 +123,8 @@ class MessageAcceptTests(unittest.TestCase):
                 "test.serv", lying_event, sent_to_us_directly=True
             )
 
-        # Step the reactor, so the database fetches come back
-        self.reactor.advance(1)
+            # Step the reactor, so the database fetches come back
+            self.reactor.advance(1)
 
         # on_receive_pdu should throw an error
         failure = self.failureResultOf(d)
diff --git a/tests/test_mau.py b/tests/test_mau.py
index bdbacb8448..04f95c942f 100644
--- a/tests/test_mau.py
+++ b/tests/test_mau.py
@@ -21,30 +21,20 @@ from mock import Mock, NonCallableMock
 
 from synapse.api.constants import LoginType
 from synapse.api.errors import Codes, HttpResponseException, SynapseError
-from synapse.http.server import JsonResource
 from synapse.rest.client.v2_alpha import register, sync
-from synapse.util import Clock
 
 from tests import unittest
-from tests.server import (
-    ThreadedMemoryReactorClock,
-    make_request,
-    render,
-    setup_test_homeserver,
-)
 
 
-class TestMauLimit(unittest.TestCase):
-    def setUp(self):
-        self.reactor = ThreadedMemoryReactorClock()
-        self.clock = Clock(self.reactor)
+class TestMauLimit(unittest.HomeserverTestCase):
 
-        self.hs = setup_test_homeserver(
-            self.addCleanup,
+    servlets = [register.register_servlets, sync.register_servlets]
+
+    def make_homeserver(self, reactor, clock):
+
+        self.hs = self.setup_test_homeserver(
             "red",
             http_client=None,
-            clock=self.clock,
-            reactor=self.reactor,
             federation_client=Mock(),
             ratelimiter=NonCallableMock(spec_set=["send_message"]),
         )
@@ -63,10 +53,7 @@ class TestMauLimit(unittest.TestCase):
         self.hs.config.server_notices_mxid_display_name = None
         self.hs.config.server_notices_mxid_avatar_url = None
         self.hs.config.server_notices_room_name = "Test Server Notice Room"
-
-        self.resource = JsonResource(self.hs)
-        register.register_servlets(self.hs, self.resource)
-        sync.register_servlets(self.hs, self.resource)
+        return self.hs
 
     def test_simple_deny_mau(self):
         # Create and sync so that the MAU counts get updated
@@ -184,6 +171,24 @@ class TestMauLimit(unittest.TestCase):
         self.assertEqual(e.code, 403)
         self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
 
+    def test_tracked_but_not_limited(self):
+        self.hs.config.max_mau_value = 1  # should not matter
+        self.hs.config.limit_usage_by_mau = False
+        self.hs.config.mau_stats_only = True
+
+        # Simply being able to create 2 users indicates that the
+        # limit was not reached.
+        token1 = self.create_user("kermit1")
+        self.do_sync_for_user(token1)
+        token2 = self.create_user("kermit2")
+        self.do_sync_for_user(token2)
+
+        # We do want to verify that the number of tracked users
+        # matches what we want though
+        count = self.store.get_monthly_active_count()
+        self.reactor.advance(100)
+        self.assertEqual(2, self.successResultOf(count))
+
     def create_user(self, localpart):
         request_data = json.dumps(
             {
@@ -193,8 +198,8 @@ class TestMauLimit(unittest.TestCase):
             }
         )
 
-        request, channel = make_request("POST", "/register", request_data)
-        render(request, self.resource, self.reactor)
+        request, channel = self.make_request("POST", "/register", request_data)
+        self.render(request)
 
         if channel.code != 200:
             raise HttpResponseException(
@@ -206,10 +211,10 @@ class TestMauLimit(unittest.TestCase):
         return access_token
 
     def do_sync_for_user(self, token):
-        request, channel = make_request(
-            "GET", "/sync", access_token=token.encode('ascii')
+        request, channel = self.make_request(
+            "GET", "/sync", access_token=token
         )
-        render(request, self.resource, self.reactor)
+        self.render(request)
 
         if channel.code != 200:
             raise HttpResponseException(
diff --git a/tests/test_metrics.py b/tests/test_metrics.py
index 17897711a1..0ff6d0e283 100644
--- a/tests/test_metrics.py
+++ b/tests/test_metrics.py
@@ -19,6 +19,28 @@ from synapse.metrics import InFlightGauge
 from tests import unittest
 
 
+def get_sample_labels_value(sample):
+    """ Extract the labels and values of a sample.
+
+    prometheus_client 0.5 changed the sample type to a named tuple with more
+    members than the plain tuple had in 0.4 and earlier. This function can
+    extract the labels and value from the sample for both sample types.
+
+    Args:
+        sample: The sample to get the labels and value from.
+    Returns:
+        A tuple of (labels, value) from the sample.
+    """
+
+    # If the sample has a labels and value attribute, use those.
+    if hasattr(sample, "labels") and hasattr(sample, "value"):
+        return sample.labels, sample.value
+    # Otherwise fall back to treating it as a plain 3 tuple.
+    else:
+        _, labels, value = sample
+        return labels, value
+
+
 class TestMauLimit(unittest.TestCase):
     def test_basic(self):
         gauge = InFlightGauge(
@@ -75,7 +97,7 @@ class TestMauLimit(unittest.TestCase):
         for r in gauge.collect():
             results[r.name] = {
                 tuple(labels[x] for x in gauge.labels): value
-                for _, labels, value in r.samples
+                for labels, value in map(get_sample_labels_value, r.samples)
             }
 
         return results
diff --git a/tests/test_server.py b/tests/test_server.py
index 4045fdadc3..08fb3fe02f 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -19,7 +19,7 @@ from six import StringIO
 
 from twisted.internet.defer import Deferred
 from twisted.python.failure import Failure
-from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
+from twisted.test.proto_helpers import AccumulatingProtocol
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
@@ -27,14 +27,21 @@ from synapse.api.errors import Codes, SynapseError
 from synapse.http.server import JsonResource
 from synapse.http.site import SynapseSite, logger
 from synapse.util import Clock
+from synapse.util.logcontext import make_deferred_yieldable
 
 from tests import unittest
-from tests.server import FakeTransport, make_request, render, setup_test_homeserver
+from tests.server import (
+    FakeTransport,
+    ThreadedMemoryReactorClock,
+    make_request,
+    render,
+    setup_test_homeserver,
+)
 
 
 class JsonResourceTests(unittest.TestCase):
     def setUp(self):
-        self.reactor = MemoryReactorClock()
+        self.reactor = ThreadedMemoryReactorClock()
         self.hs_clock = Clock(self.reactor)
         self.homeserver = setup_test_homeserver(
             self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.reactor
@@ -57,7 +64,9 @@ class JsonResourceTests(unittest.TestCase):
             "GET", [re.compile("^/_matrix/foo/(?P<room_id>[^/]*)$")], _callback
         )
 
-        request, channel = make_request(b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83")
+        request, channel = make_request(
+            self.reactor, b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83"
+        )
         render(request, res, self.reactor)
 
         self.assertEqual(request.args, {b'a': [u"\N{SNOWMAN}".encode('utf8')]})
@@ -75,7 +84,7 @@ class JsonResourceTests(unittest.TestCase):
         res = JsonResource(self.homeserver)
         res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
 
-        request, channel = make_request(b"GET", b"/_matrix/foo")
+        request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
         render(request, res, self.reactor)
 
         self.assertEqual(channel.result["code"], b'500')
@@ -93,12 +102,12 @@ class JsonResourceTests(unittest.TestCase):
             d = Deferred()
             d.addCallback(_throw)
             self.reactor.callLater(1, d.callback, True)
-            return d
+            return make_deferred_yieldable(d)
 
         res = JsonResource(self.homeserver)
         res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
 
-        request, channel = make_request(b"GET", b"/_matrix/foo")
+        request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
         render(request, res, self.reactor)
 
         self.assertEqual(channel.result["code"], b'500')
@@ -115,7 +124,7 @@ class JsonResourceTests(unittest.TestCase):
         res = JsonResource(self.homeserver)
         res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
 
-        request, channel = make_request(b"GET", b"/_matrix/foo")
+        request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
         render(request, res, self.reactor)
 
         self.assertEqual(channel.result["code"], b'403')
@@ -136,7 +145,7 @@ class JsonResourceTests(unittest.TestCase):
         res = JsonResource(self.homeserver)
         res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
 
-        request, channel = make_request(b"GET", b"/_matrix/foobar")
+        request, channel = make_request(self.reactor, b"GET", b"/_matrix/foobar")
         render(request, res, self.reactor)
 
         self.assertEqual(channel.result["code"], b'400')
diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py
new file mode 100644
index 0000000000..0968e86a7b
--- /dev/null
+++ b/tests/test_terms_auth.py
@@ -0,0 +1,123 @@
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+import six
+from mock import Mock
+
+from twisted.test.proto_helpers import MemoryReactorClock
+
+from synapse.rest.client.v2_alpha.register import register_servlets
+from synapse.util import Clock
+
+from tests import unittest
+
+
+class TermsTestCase(unittest.HomeserverTestCase):
+    servlets = [register_servlets]
+
+    def prepare(self, reactor, clock, hs):
+        self.clock = MemoryReactorClock()
+        self.hs_clock = Clock(self.clock)
+        self.url = "/_matrix/client/r0/register"
+        self.registration_handler = Mock()
+        self.auth_handler = Mock()
+        self.device_handler = Mock()
+        hs.config.enable_registration = True
+        hs.config.registrations_require_3pid = []
+        hs.config.auto_join_rooms = []
+        hs.config.enable_registration_captcha = False
+
+    def test_ui_auth(self):
+        self.hs.config.user_consent_at_registration = True
+        self.hs.config.user_consent_policy_name = "My Cool Privacy Policy"
+        self.hs.config.public_baseurl = "https://example.org/"
+        self.hs.config.user_consent_version = "1.0"
+
+        # Do a UI auth request
+        request, channel = self.make_request(b"POST", self.url, b"{}")
+        self.render(request)
+
+        self.assertEquals(channel.result["code"], b"401", channel.result)
+
+        self.assertTrue(channel.json_body is not None)
+        self.assertIsInstance(channel.json_body["session"], six.text_type)
+
+        self.assertIsInstance(channel.json_body["flows"], list)
+        for flow in channel.json_body["flows"]:
+            self.assertIsInstance(flow["stages"], list)
+            self.assertTrue(len(flow["stages"]) > 0)
+            self.assertEquals(flow["stages"][-1], "m.login.terms")
+
+        expected_params = {
+            "m.login.terms": {
+                "policies": {
+                    "privacy_policy": {
+                        "en": {
+                            "name": "My Cool Privacy Policy",
+                            "url": "https://example.org/_matrix/consent?v=1.0",
+                        },
+                        "version": "1.0"
+                    },
+                },
+            },
+        }
+        self.assertIsInstance(channel.json_body["params"], dict)
+        self.assertDictContainsSubset(channel.json_body["params"], expected_params)
+
+        # We have to complete the dummy auth stage before completing the terms stage
+        request_data = json.dumps(
+            {
+                "username": "kermit",
+                "password": "monkey",
+                "auth": {
+                    "session": channel.json_body["session"],
+                    "type": "m.login.dummy",
+                },
+            }
+        )
+
+        self.registration_handler.check_username = Mock(return_value=True)
+
+        request, channel = self.make_request(b"POST", self.url, request_data)
+        self.render(request)
+
+        # We don't bother checking that the response is correct - we'll leave that to
+        # other tests. We just want to make sure we're on the right path.
+        self.assertEquals(channel.result["code"], b"401", channel.result)
+
+        # Finish the UI auth for terms
+        request_data = json.dumps(
+            {
+                "username": "kermit",
+                "password": "monkey",
+                "auth": {
+                    "session": channel.json_body["session"],
+                    "type": "m.login.terms",
+                },
+            }
+        )
+        request, channel = self.make_request(b"POST", self.url, request_data)
+        self.render(request)
+
+        # We're interested in getting a response that looks like a successful
+        # registration, not so much that the details are exactly what we want.
+
+        self.assertEquals(channel.result["code"], b"200", channel.result)
+
+        self.assertTrue(channel.json_body is not None)
+        self.assertIsInstance(channel.json_body["user_id"], six.text_type)
+        self.assertIsInstance(channel.json_body["access_token"], six.text_type)
+        self.assertIsInstance(channel.json_body["device_id"], six.text_type)
diff --git a/tests/test_types.py b/tests/test_types.py
index 0f5c8bfaf9..d314a7ff58 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 from synapse.api.errors import SynapseError
-from synapse.types import GroupID, RoomAlias, UserID
+from synapse.types import GroupID, RoomAlias, UserID, map_username_to_mxid_localpart
 
 from tests import unittest
 from tests.utils import TestHomeServer
@@ -79,3 +79,32 @@ class GroupIDTestCase(unittest.TestCase):
             except SynapseError as exc:
                 self.assertEqual(400, exc.code)
                 self.assertEqual("M_UNKNOWN", exc.errcode)
+
+
+class MapUsernameTestCase(unittest.TestCase):
+    def testPassThrough(self):
+        self.assertEqual(map_username_to_mxid_localpart("test1234"), "test1234")
+
+    def testUpperCase(self):
+        self.assertEqual(map_username_to_mxid_localpart("tEST_1234"), "test_1234")
+        self.assertEqual(
+            map_username_to_mxid_localpart("tEST_1234", case_sensitive=True),
+            "t_e_s_t__1234",
+        )
+
+    def testSymbols(self):
+        self.assertEqual(
+            map_username_to_mxid_localpart("test=$?_1234"),
+            "test=3d=24=3f_1234",
+        )
+
+    def testLeadingUnderscore(self):
+        self.assertEqual(map_username_to_mxid_localpart("_test_1234"), "=5ftest_1234")
+
+    def testNonAscii(self):
+        # this should work with either a unicode or a bytes
+        self.assertEqual(map_username_to_mxid_localpart(u'têst'), "t=c3=aast")
+        self.assertEqual(
+            map_username_to_mxid_localpart(u'têst'.encode('utf-8')),
+            "t=c3=aast",
+        )
diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py
new file mode 100644
index 0000000000..a7310cf12a
--- /dev/null
+++ b/tests/test_utils/__init__.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for running the unit tests
+"""
diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py
new file mode 100644
index 0000000000..d0bc8e2112
--- /dev/null
+++ b/tests/test_utils/logging_setup.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+import os
+
+import twisted.logger
+
+from synapse.util.logcontext import LoggingContextFilter
+
+
+class ToTwistedHandler(logging.Handler):
+    """logging handler which sends the logs to the twisted log"""
+    tx_log = twisted.logger.Logger()
+
+    def emit(self, record):
+        log_entry = self.format(record)
+        log_level = record.levelname.lower().replace('warning', 'warn')
+        self.tx_log.emit(
+            twisted.logger.LogLevel.levelWithName(log_level),
+            log_entry.replace("{", r"(").replace("}", r")"),
+        )
+
+
+def setup_logging():
+    """Configure the python logging appropriately for the tests.
+
+    (Logs will end up in _trial_temp.)
+    """
+    root_logger = logging.getLogger()
+
+    log_format = (
+        "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s"
+    )
+
+    handler = ToTwistedHandler()
+    formatter = logging.Formatter(log_format)
+    handler.setFormatter(formatter)
+    handler.addFilter(LoggingContextFilter(request=""))
+    root_logger.addHandler(handler)
+
+    log_level = os.environ.get("SYNAPSE_TEST_LOG_LEVEL", "ERROR")
+    root_logger.setLevel(log_level)
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index 2eea3b098b..455db9f276 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -17,6 +17,7 @@ import logging
 from twisted.internet import defer
 from twisted.internet.defer import succeed
 
+from synapse.api.constants import RoomVersions
 from synapse.events import FrozenEvent
 from synapse.visibility import filter_events_for_server
 
@@ -124,6 +125,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
     def inject_visibility(self, user_id, visibility):
         content = {"history_visibility": visibility}
         builder = self.event_builder_factory.new(
+            RoomVersions.V1,
             {
                 "type": "m.room.history_visibility",
                 "sender": user_id,
@@ -144,6 +146,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
         content = {"membership": membership}
         content.update(extra_content)
         builder = self.event_builder_factory.new(
+            RoomVersions.V1,
             {
                 "type": "m.room.member",
                 "sender": user_id,
@@ -163,8 +166,9 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
     @defer.inlineCallbacks
     def inject_message(self, user_id, content=None):
         if content is None:
-            content = {"body": "testytest"}
+            content = {"body": "testytest", "msgtype": "m.text"}
         builder = self.event_builder_factory.new(
+            RoomVersions.V1,
             {
                 "type": "m.room.message",
                 "sender": user_id,
diff --git a/tests/unittest.py b/tests/unittest.py
index a59291cc60..fac254ff10 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -13,7 +13,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+import gc
 import hashlib
 import hmac
 import logging
@@ -31,36 +31,14 @@ from synapse.http.server import JsonResource
 from synapse.http.site import SynapseRequest
 from synapse.server import HomeServer
 from synapse.types import UserID, create_requester
-from synapse.util.logcontext import LoggingContextFilter
+from synapse.util.logcontext import LoggingContext
 
 from tests.server import get_clock, make_request, render, setup_test_homeserver
-from tests.utils import default_config
-
-# Set up putting Synapse's logs into Trial's.
-rootLogger = logging.getLogger()
-
-log_format = (
-    "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s"
-)
-
-
-class ToTwistedHandler(logging.Handler):
-    tx_log = twisted.logger.Logger()
-
-    def emit(self, record):
-        log_entry = self.format(record)
-        log_level = record.levelname.lower().replace('warning', 'warn')
-        self.tx_log.emit(
-            twisted.logger.LogLevel.levelWithName(log_level),
-            log_entry.replace("{", r"(").replace("}", r")"),
-        )
+from tests.test_utils.logging_setup import setup_logging
+from tests.utils import default_config, setupdb
 
-
-handler = ToTwistedHandler()
-formatter = logging.Formatter(log_format)
-handler.setFormatter(formatter)
-handler.addFilter(LoggingContextFilter(request=""))
-rootLogger.addHandler(handler)
+setupdb()
+setup_logging()
 
 
 def around(target):
@@ -94,7 +72,7 @@ class TestCase(unittest.TestCase):
 
         method = getattr(self, methodName)
 
-        level = getattr(method, "loglevel", getattr(self, "loglevel", logging.ERROR))
+        level = getattr(method, "loglevel", getattr(self, "loglevel", None))
 
         @around(self)
         def setUp(orig):
@@ -102,9 +80,17 @@ class TestCase(unittest.TestCase):
             # traceback when a unit test exits leaving things on the reactor.
             twisted.internet.base.DelayedCall.debug = True
 
-            old_level = logging.getLogger().level
+            # if we're not starting in the sentinel logcontext, then to be honest
+            # all future bets are off.
+            if LoggingContext.current_context() is not LoggingContext.sentinel:
+                self.fail(
+                    "Test starting with non-sentinel logging context %s" % (
+                        LoggingContext.current_context(),
+                    )
+                )
 
-            if old_level != level:
+            old_level = logging.getLogger().level
+            if level is not None and old_level != level:
 
                 @around(self)
                 def tearDown(orig):
@@ -112,9 +98,20 @@ class TestCase(unittest.TestCase):
                     logging.getLogger().setLevel(old_level)
                     return ret
 
-            logging.getLogger().setLevel(level)
+                logging.getLogger().setLevel(level)
+
             return orig()
 
+        @around(self)
+        def tearDown(orig):
+            ret = orig()
+            # force a GC to workaround problems with deferreds leaking logcontexts when
+            # they are GCed (see the logcontext docs)
+            gc.collect()
+            LoggingContext.set_current_context(LoggingContext.sentinel)
+
+            return ret
+
     def assertObjectHasAttributes(self, attrs, obj):
         """Asserts that the given object has each of the attributes given, and
         that the value of each matches according to assertEquals."""
@@ -146,6 +143,13 @@ def DEBUG(target):
     return target
 
 
+def INFO(target):
+    """A decorator to set the .loglevel attribute to logging.INFO.
+    Can apply to either a TestCase or an individual test method."""
+    target.loglevel = logging.INFO
+    return target
+
+
 class HomeserverTestCase(TestCase):
     """
     A base TestCase that reduces boilerplate for HomeServer-using test cases.
@@ -182,11 +186,11 @@ class HomeserverTestCase(TestCase):
         for servlet in self.servlets:
             servlet(self.hs, self.resource)
 
-        if hasattr(self, "user_id"):
-            from tests.rest.client.v1.utils import RestHelper
+        from tests.rest.client.v1.utils import RestHelper
 
-            self.helper = RestHelper(self.hs, self.resource, self.user_id)
+        self.helper = RestHelper(self.hs, self.resource, getattr(self, "user_id", None))
 
+        if hasattr(self, "user_id"):
             if self.hijack_auth:
 
                 def get_user_by_access_token(token=None, allow_guest=False):
@@ -251,7 +255,13 @@ class HomeserverTestCase(TestCase):
         """
 
     def make_request(
-        self, method, path, content=b"", access_token=None, request=SynapseRequest
+        self,
+        method,
+        path,
+        content=b"",
+        access_token=None,
+        request=SynapseRequest,
+        shorthand=True,
     ):
         """
         Create a SynapseRequest at the path using the method and containing the
@@ -263,6 +273,8 @@ class HomeserverTestCase(TestCase):
             escaped UTF-8 & spaces and such).
             content (bytes or dict): The body of the request. JSON-encoded, if
             a dict.
+            shorthand: Whether to try and be helpful and prefix the given URL
+            with the usual REST API path, if it doesn't contain it.
 
         Returns:
             A synapse.http.site.SynapseRequest.
@@ -270,7 +282,9 @@ class HomeserverTestCase(TestCase):
         if isinstance(content, dict):
             content = json.dumps(content).encode('utf8')
 
-        return make_request(method, path, content, access_token, request)
+        return make_request(
+            self.reactor, method, path, content, access_token, request, shorthand
+        )
 
     def render(self, request):
         """
@@ -296,7 +310,15 @@ class HomeserverTestCase(TestCase):
         """
         kwargs = dict(kwargs)
         kwargs.update(self._hs_args)
-        return setup_test_homeserver(self.addCleanup, *args, **kwargs)
+        hs = setup_test_homeserver(self.addCleanup, *args, **kwargs)
+        stor = hs.get_datastore()
+
+        # Run the database background updates.
+        if hasattr(stor, "do_next_background_update"):
+            while not self.get_success(stor.has_completed_background_updates()):
+                self.get_success(stor.do_next_background_update(1))
+
+        return hs
 
     def pump(self, by=0.0):
         """
@@ -336,6 +358,7 @@ class HomeserverTestCase(TestCase):
             nonce_str += b"\x00admin"
         else:
             nonce_str += b"\x00notadmin"
+
         want_mac.update(nonce.encode('ascii') + b"\x00" + nonce_str)
         want_mac = want_mac.hexdigest()
 
@@ -373,5 +396,5 @@ class HomeserverTestCase(TestCase):
         self.render(request)
         self.assertEqual(channel.code, 200)
 
-        access_token = channel.json_body["access_token"].encode('ascii')
+        access_token = channel.json_body["access_token"]
         return access_token
diff --git a/tests/util/caches/test_ttlcache.py b/tests/util/caches/test_ttlcache.py
new file mode 100644
index 0000000000..03b3c15db6
--- /dev/null
+++ b/tests/util/caches/test_ttlcache.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from synapse.util.caches.ttlcache import TTLCache
+
+from tests import unittest
+
+
+class CacheTestCase(unittest.TestCase):
+    def setUp(self):
+        self.mock_timer = Mock(side_effect=lambda: 100.0)
+        self.cache = TTLCache("test_cache", self.mock_timer)
+
+    def test_get(self):
+        """simple set/get tests"""
+        self.cache.set('one', '1', 10)
+        self.cache.set('two', '2', 20)
+        self.cache.set('three', '3', 30)
+
+        self.assertEqual(len(self.cache), 3)
+
+        self.assertTrue('one' in self.cache)
+        self.assertEqual(self.cache.get('one'), '1')
+        self.assertEqual(self.cache['one'], '1')
+        self.assertEqual(self.cache.get_with_expiry('one'), ('1', 110))
+        self.assertEqual(self.cache._metrics.hits, 3)
+        self.assertEqual(self.cache._metrics.misses, 0)
+
+        self.cache.set('two', '2.5', 20)
+        self.assertEqual(self.cache['two'], '2.5')
+        self.assertEqual(self.cache._metrics.hits, 4)
+
+        # non-existent-item tests
+        self.assertEqual(self.cache.get('four', '4'), '4')
+        self.assertIs(self.cache.get('four', None), None)
+
+        with self.assertRaises(KeyError):
+            self.cache['four']
+
+        with self.assertRaises(KeyError):
+            self.cache.get('four')
+
+        with self.assertRaises(KeyError):
+            self.cache.get_with_expiry('four')
+
+        self.assertEqual(self.cache._metrics.hits, 4)
+        self.assertEqual(self.cache._metrics.misses, 5)
+
+    def test_expiry(self):
+        self.cache.set('one', '1', 10)
+        self.cache.set('two', '2', 20)
+        self.cache.set('three', '3', 30)
+
+        self.assertEqual(len(self.cache), 3)
+        self.assertEqual(self.cache['one'], '1')
+        self.assertEqual(self.cache['two'], '2')
+
+        # enough for the first entry to expire, but not the rest
+        self.mock_timer.side_effect = lambda: 110.0
+
+        self.assertEqual(len(self.cache), 2)
+        self.assertFalse('one' in self.cache)
+        self.assertEqual(self.cache['two'], '2')
+        self.assertEqual(self.cache['three'], '3')
+
+        self.assertEqual(self.cache.get_with_expiry('two'), ('2', 120))
+
+        self.assertEqual(self.cache._metrics.hits, 5)
+        self.assertEqual(self.cache._metrics.misses, 0)
diff --git a/tests/util/test_async_utils.py b/tests/util/test_async_utils.py
new file mode 100644
index 0000000000..84dd71e47a
--- /dev/null
+++ b/tests/util/test_async_utils.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import defer
+from twisted.internet.defer import CancelledError, Deferred
+from twisted.internet.task import Clock
+
+from synapse.util import logcontext
+from synapse.util.async_helpers import timeout_deferred
+from synapse.util.logcontext import LoggingContext
+
+from tests.unittest import TestCase
+
+
+class TimeoutDeferredTest(TestCase):
+    def setUp(self):
+        self.clock = Clock()
+
+    def test_times_out(self):
+        """Basic test case that checks that the original deferred is cancelled and that
+        the timing-out deferred is errbacked
+        """
+        cancelled = [False]
+
+        def canceller(_d):
+            cancelled[0] = True
+
+        non_completing_d = Deferred(canceller)
+        timing_out_d = timeout_deferred(non_completing_d, 1.0, self.clock)
+
+        self.assertNoResult(timing_out_d)
+        self.assertFalse(cancelled[0], "deferred was cancelled prematurely")
+
+        self.clock.pump((1.0, ))
+
+        self.assertTrue(cancelled[0], "deferred was not cancelled by timeout")
+        self.failureResultOf(timing_out_d, defer.TimeoutError, )
+
+    def test_times_out_when_canceller_throws(self):
+        """Test that we have successfully worked around
+        https://twistedmatrix.com/trac/ticket/9534"""
+
+        def canceller(_d):
+            raise Exception("can't cancel this deferred")
+
+        non_completing_d = Deferred(canceller)
+        timing_out_d = timeout_deferred(non_completing_d, 1.0, self.clock)
+
+        self.assertNoResult(timing_out_d)
+
+        self.clock.pump((1.0, ))
+
+        self.failureResultOf(timing_out_d, defer.TimeoutError, )
+
+    def test_logcontext_is_preserved_on_cancellation(self):
+        blocking_was_cancelled = [False]
+
+        @defer.inlineCallbacks
+        def blocking():
+            non_completing_d = Deferred()
+            with logcontext.PreserveLoggingContext():
+                try:
+                    yield non_completing_d
+                except CancelledError:
+                    blocking_was_cancelled[0] = True
+                    raise
+
+        with logcontext.LoggingContext("one") as context_one:
+            # the errbacks should be run in the test logcontext
+            def errback(res, deferred_name):
+                self.assertIs(
+                    LoggingContext.current_context(), context_one,
+                    "errback %s run in unexpected logcontext %s" % (
+                        deferred_name, LoggingContext.current_context(),
+                    )
+                )
+                return res
+
+            original_deferred = blocking()
+            original_deferred.addErrback(errback, "orig")
+            timing_out_d = timeout_deferred(original_deferred, 1.0, self.clock)
+            self.assertNoResult(timing_out_d)
+            self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
+            timing_out_d.addErrback(errback, "timingout")
+
+            self.clock.pump((1.0, ))
+
+            self.assertTrue(
+                blocking_was_cancelled[0],
+                "non-completing deferred was not cancelled",
+            )
+            self.failureResultOf(timing_out_d, defer.TimeoutError, )
+            self.assertIs(LoggingContext.current_context(), context_one)
diff --git a/tests/utils.py b/tests/utils.py
index 5ddf633f56..81c5215e6f 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -26,7 +26,7 @@ from six.moves.urllib import parse as urlparse
 
 from twisted.internet import defer, reactor
 
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, RoomVersions
 from synapse.api.errors import CodeMessageException, cs_error
 from synapse.config.server import ServerConfig
 from synapse.federation.transport import server
@@ -124,6 +124,8 @@ def default_config(name):
     config.replicate_user_profiles_to = []
     config.user_consent_server_notice_content = None
     config.block_events_without_consent_error = None
+    config.user_consent_at_registration = False
+    config.user_consent_policy_name = "Privacy Policy"
     config.media_storage_providers = []
     config.autocreate_auto_join_rooms = True
     config.auto_join_rooms = []
@@ -133,12 +135,16 @@ def default_config(name):
     config.hs_disabled_limit_type = ""
     config.max_mau_value = 50
     config.mau_trial_days = 0
+    config.mau_stats_only = False
     config.mau_limits_reserved_threepids = []
     config.admin_contact = None
     config.rc_messages_per_second = 10000
     config.rc_message_burst_count = 10000
     config.register_mxid_from_3pid = None
     config.shadow_server = None
+    config.saml2_enabled = False
+    config.public_baseurl = None
+    config.default_identity_server = None
 
     config.use_frozen_dicts = False
 
@@ -151,7 +157,9 @@ def default_config(name):
     config.update_user_directory = False
 
     def is_threepid_reserved(threepid):
-        return ServerConfig.is_threepid_reserved(config, threepid)
+        return ServerConfig.is_threepid_reserved(
+            config.mau_limits_reserved_threepids, threepid
+        )
 
     config.is_threepid_reserved.side_effect = is_threepid_reserved
 
@@ -619,6 +627,7 @@ def create_room(hs, room_id, creator_id):
     event_creation_handler = hs.get_event_creation_handler()
 
     builder = event_builder_factory.new(
+        RoomVersions.V1,
         {
             "type": EventTypes.Create,
             "state_key": "",
diff --git a/tox.ini b/tox.ini
index 9de5a5704a..14437e7334 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,31 +3,49 @@ envlist = packaging, py27, py36, pep8, check_isort
 
 [base]
 deps =
-    Twisted>=17.1
     mock
     python-subunit
     junitxml
-
-    # needed by some of the tests
-    lxml
+    coverage
+    parameterized
+
+    # cyptography 2.2 requires setuptools >= 18.5
+    #
+    # older versions of virtualenv (?) give us a virtualenv with the same
+    # version of setuptools as is installed on the system python (and tox runs
+    # virtualenv under python3, so we get the version of setuptools that is
+    # installed on that).
+    #
+    # anyway, make sure that we have a recent enough setuptools.
+    setuptools>=18.5
+
+    # we also need a semi-recent version of pip, because old ones fail to
+    # install the "enum34" dependency of cryptography.
+    pip>=10
 
 setenv =
     PYTHONDONTWRITEBYTECODE = no_byte_code
+    COVERAGE_PROCESS_START = {toxinidir}/.coveragerc
 
 [testenv]
 deps =
     {[base]deps}
+extras = all
+
+whitelist_externals =
+    sh
 
 setenv =
     {[base]setenv}
+    postgres: SYNAPSE_POSTGRES = 1
 
 passenv = *
 
 commands =
     /usr/bin/find "{toxinidir}" -name '*.pyc' -delete
-    "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
-
-[testenv:py27]
+    # Add this so that coverage will run on subprocesses
+    sh -c 'echo "import coverage; coverage.process_startup()" > {envsitepackagesdir}/../sitecustomize.py'
+    {envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
 
 # As of twisted 16.4, trial tries to import the tests as a package (previously
 # it loaded the files explicitly), which means they need to be on the
@@ -52,14 +70,7 @@ commands =
 # )
 usedevelop=true
 
-[testenv:py27-postgres]
-usedevelop=true
-deps =
-    {[base]deps}
-     psycopg2
-setenv =
-    {[base]setenv}
-    SYNAPSE_POSTGRES = 1
+
 
 # A test suite for the oldest supported versions of Python libraries, to catch
 # any uses of APIs not available in them.
@@ -81,23 +92,8 @@ commands =
     pip install -e .
     {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
 
-[testenv:py35]
-usedevelop=true
-
-[testenv:py36]
-usedevelop=true
-
-[testenv:py36-postgres]
-usedevelop=true
-deps =
-    {[base]deps}
-     psycopg2
-setenv =
-    {[base]setenv}
-    SYNAPSE_POSTGRES = 1
-
-
 [testenv:packaging]
+skip_install=True
 deps =
     check-manifest
 commands =
@@ -108,12 +104,12 @@ skip_install = True
 basepython = python3.6
 deps =
     flake8
-commands = /bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}"
+commands = /bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/hash_password scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}"
 
 [testenv:check_isort]
 skip_install = True
 deps = isort
-commands = /bin/sh -c "isort -c -sp setup.cfg -rc synapse tests"
+commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests"
 
 [testenv:check-newsfragment]
 skip_install = True
@@ -121,3 +117,13 @@ deps = towncrier>=18.6.0rc1
 commands =
    python -m towncrier.check --compare-with=origin/develop
 basepython = python3.6
+
+[testenv:codecov]
+skip_install = True
+deps =
+    coverage
+    codecov
+commands =
+    coverage combine
+    coverage xml
+    codecov -X gcov