summary refs log tree commit diff
diff options
context:
space:
mode:
authorAmber Brown <hawkowl@atleastfornow.net>2018-09-03 21:08:35 +1000
committerGitHub <noreply@github.com>2018-09-03 21:08:35 +1000
commit4fc4b881c58fd638db5f4dac0863721111b67af0 (patch)
treecc1604f5e3b4e0a263e0e11a55b62ef4006a64a1
parentThe project `matrix-synapse-auto-deploy` does not seem to be maintained anymore. (diff)
parentMerge pull request #3777 from matrix-org/neilj/fix_register_user_registration (diff)
downloadsynapse-4fc4b881c58fd638db5f4dac0863721111b67af0.tar.xz
Merge branch 'develop' into develop
-rw-r--r--.circleci/config.yml48
-rw-r--r--.dockerignore3
-rw-r--r--.github/ISSUE_TEMPLATE.md5
-rw-r--r--.travis.yml23
-rw-r--r--AUTHORS.rst5
-rw-r--r--CHANGES.md2634
-rw-r--r--CHANGES.rst2761
-rw-r--r--CONTRIBUTING.rst45
-rw-r--r--Dockerfile19
-rw-r--r--MANIFEST.in6
-rw-r--r--README.rst39
-rw-r--r--changelog.d/.gitignore1
-rw-r--r--changelog.d/3659.feature1
-rw-r--r--changelog.d/3673.misc1
-rw-r--r--changelog.d/3680.feature1
-rw-r--r--changelog.d/3722.bugfix1
-rw-r--r--changelog.d/3724.feature1
-rw-r--r--changelog.d/3725.misc1
-rw-r--r--changelog.d/3726.misc1
-rw-r--r--changelog.d/3727.misc1
-rw-r--r--changelog.d/3730.misc1
-rw-r--r--changelog.d/3734.misc1
-rw-r--r--changelog.d/3735.misc1
-rw-r--r--changelog.d/3746.misc1
-rw-r--r--changelog.d/3747.bugfix1
-rw-r--r--changelog.d/3749.feature1
-rw-r--r--changelog.d/3751.feature1
-rw-r--r--changelog.d/3753.bugfix1
-rw-r--r--changelog.d/3754.bugfix1
-rw-r--r--changelog.d/3755.bugfix1
-rw-r--r--changelog.d/3756.bugfix1
-rw-r--r--changelog.d/3758.bugfix1
-rw-r--r--changelog.d/3760.bugfix1
-rw-r--r--changelog.d/3764.misc1
-rw-r--r--changelog.d/3768.bugfix1
-rw-r--r--changelog.d/3777.bugfix1
-rw-r--r--contrib/docker/README.md116
-rw-r--r--contrib/docker/docker-compose.yml1
-rw-r--r--contrib/grafana/README.md6
-rw-r--r--contrib/grafana/synapse.json4969
-rw-r--r--docker/Dockerfile35
-rw-r--r--docker/README.md124
-rw-r--r--docker/conf/homeserver.yaml (renamed from contrib/docker/conf/homeserver.yaml)0
-rw-r--r--docker/conf/log.config (renamed from contrib/docker/conf/log.config)0
-rwxr-xr-xdocker/start.py (renamed from contrib/docker/start.py)0
-rw-r--r--docs/admin_api/register_api.rst63
-rw-r--r--docs/admin_api/user_admin_api.rst17
-rw-r--r--docs/workers.rst28
-rwxr-xr-xjenkins/prepare_synapse.sh2
-rw-r--r--pyproject.toml30
-rwxr-xr-xscripts-dev/federation_client.py65
-rwxr-xr-xscripts/register_new_matrix_user32
-rw-r--r--setup.cfg24
-rw-r--r--synapse/__init__.py3
-rw-r--r--synapse/api/auth.py241
-rw-r--r--synapse/api/constants.py20
-rw-r--r--synapse/api/errors.py154
-rw-r--r--synapse/api/filtering.py33
-rw-r--r--synapse/api/ratelimiting.py2
-rw-r--r--synapse/api/urls.py2
-rw-r--r--synapse/app/__init__.py4
-rw-r--r--synapse/app/_base.py17
-rw-r--r--synapse/app/appservice.py18
-rw-r--r--synapse/app/client_reader.py34
-rw-r--r--synapse/app/event_creator.py34
-rw-r--r--synapse/app/federation_reader.py30
-rw-r--r--synapse/app/federation_sender.py23
-rw-r--r--synapse/app/frontend_proxy.py57
-rwxr-xr-xsynapse/app/homeserver.py106
-rw-r--r--synapse/app/media_repository.py22
-rw-r--r--synapse/app/pusher.py19
-rw-r--r--synapse/app/synchrotron.py42
-rwxr-xr-xsynapse/app/synctl.py9
-rw-r--r--synapse/app/user_dir.py17
-rw-r--r--synapse/appservice/__init__.py16
-rw-r--r--synapse/appservice/api.py14
-rw-r--r--synapse/appservice/scheduler.py4
-rw-r--r--synapse/config/_base.py3
-rw-r--r--synapse/config/api.py4
-rw-r--r--synapse/config/appservice.py24
-rw-r--r--synapse/config/homeserver.py36
-rw-r--r--synapse/config/jwt.py1
-rw-r--r--synapse/config/key.py21
-rw-r--r--synapse/config/logger.py32
-rw-r--r--synapse/config/password_auth_providers.py4
-rw-r--r--synapse/config/registration.py4
-rw-r--r--synapse/config/repository.py2
-rw-r--r--synapse/config/server.py89
-rw-r--r--synapse/config/server_notices_config.py3
-rw-r--r--synapse/config/tls.py11
-rw-r--r--synapse/config/voip.py4
-rw-r--r--synapse/crypto/context_factory.py88
-rw-r--r--synapse/crypto/event_signing.py10
-rw-r--r--synapse/crypto/keyclient.py26
-rw-r--r--synapse/crypto/keyring.py48
-rw-r--r--synapse/event_auth.py130
-rw-r--r--synapse/events/__init__.py3
-rw-r--r--synapse/events/builder.py5
-rw-r--r--synapse/events/snapshot.py202
-rw-r--r--synapse/events/utils.py9
-rw-r--r--synapse/events/validator.py8
-rw-r--r--synapse/federation/federation_base.py11
-rw-r--r--synapse/federation/federation_client.py342
-rw-r--r--synapse/federation/federation_server.py278
-rw-r--r--synapse/federation/persistence.py5
-rw-r--r--synapse/federation/send_queue.py79
-rw-r--r--synapse/federation/transaction_queue.py113
-rw-r--r--synapse/federation/transport/client.py16
-rw-r--r--synapse/federation/transport/server.py180
-rw-r--r--synapse/federation/units.py4
-rw-r--r--synapse/groups/attestations.py15
-rw-r--r--synapse/groups/groups_server.py7
-rw-r--r--synapse/handlers/__init__.py10
-rw-r--r--synapse/handlers/_base.py6
-rw-r--r--synapse/handlers/admin.py4
-rw-r--r--synapse/handlers/appservice.py28
-rw-r--r--synapse/handlers/auth.py129
-rw-r--r--synapse/handlers/deactivate_account.py55
-rw-r--r--synapse/handlers/device.py20
-rw-r--r--synapse/handlers/devicemessage.py3
-rw-r--r--synapse/handlers/directory.py9
-rw-r--r--synapse/handlers/e2e_keys.py17
-rw-r--r--synapse/handlers/events.py40
-rw-r--r--synapse/handlers/federation.py665
-rw-r--r--synapse/handlers/groups_local.py7
-rw-r--r--synapse/handlers/identity.py102
-rw-r--r--synapse/handlers/initial_sync.py53
-rw-r--r--synapse/handlers/message.py431
-rw-r--r--synapse/handlers/pagination.py298
-rw-r--r--synapse/handlers/presence.py54
-rw-r--r--synapse/handlers/profile.py128
-rw-r--r--synapse/handlers/read_marker.py7
-rw-r--r--synapse/handlers/receipts.py26
-rw-r--r--synapse/handlers/register.py26
-rw-r--r--synapse/handlers/room.py93
-rw-r--r--synapse/handlers/room_list.py28
-rw-r--r--synapse/handlers/room_member.py30
-rw-r--r--synapse/handlers/room_member_worker.py40
-rw-r--r--synapse/handlers/search.py33
-rw-r--r--synapse/handlers/set_password.py1
-rw-r--r--synapse/handlers/sync.py367
-rw-r--r--synapse/handlers/typing.py11
-rw-r--r--synapse/handlers/user_directory.py20
-rw-r--r--synapse/http/additional_resource.py3
-rw-r--r--synapse/http/client.py105
-rw-r--r--synapse/http/endpoint.py109
-rw-r--r--synapse/http/matrixfederationclient.py90
-rw-r--r--synapse/http/request_metrics.py115
-rw-r--r--synapse/http/server.py157
-rw-r--r--synapse/http/servlet.py75
-rw-r--r--synapse/http/site.py250
-rw-r--r--synapse/metrics/__init__.py52
-rw-r--r--synapse/metrics/background_process_metrics.py201
-rw-r--r--synapse/notifier.py28
-rw-r--r--synapse/push/action_generator.py6
-rw-r--r--synapse/push/baserules.py3
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py24
-rw-r--r--synapse/push/clientformat.py6
-rw-r--r--synapse/push/emailpusher.py11
-rw-r--r--synapse/push/httppusher.py12
-rw-r--r--synapse/push/mailer.py33
-rw-r--r--synapse/push/presentable_names.py6
-rw-r--r--synapse/push/push_rule_evaluator.py4
-rw-r--r--synapse/push/push_tools.py5
-rw-r--r--synapse/push/pusher.py3
-rw-r--r--synapse/push/pusherpool.py20
-rw-r--r--synapse/python_dependencies.py11
-rw-r--r--synapse/replication/http/__init__.py4
-rw-r--r--synapse/replication/http/_base.py215
-rw-r--r--synapse/replication/http/federation.py259
-rw-r--r--synapse/replication/http/membership.py294
-rw-r--r--synapse/replication/http/send_event.py115
-rw-r--r--synapse/replication/slave/storage/_base.py4
-rw-r--r--synapse/replication/slave/storage/appservice.py3
-rw-r--r--synapse/replication/slave/storage/client_ips.py3
-rw-r--r--synapse/replication/slave/storage/deviceinbox.py7
-rw-r--r--synapse/replication/slave/storage/devices.py5
-rw-r--r--synapse/replication/slave/storage/directory.py3
-rw-r--r--synapse/replication/slave/storage/events.py7
-rw-r--r--synapse/replication/slave/storage/filtering.py3
-rw-r--r--synapse/replication/slave/storage/groups.py5
-rw-r--r--synapse/replication/slave/storage/keys.py3
-rw-r--r--synapse/replication/slave/storage/presence.py8
-rw-r--r--synapse/replication/slave/storage/push_rule.py5
-rw-r--r--synapse/replication/slave/storage/pushers.py4
-rw-r--r--synapse/replication/slave/storage/receipts.py6
-rw-r--r--synapse/replication/slave/storage/registration.py3
-rw-r--r--synapse/replication/slave/storage/room.py3
-rw-r--r--synapse/replication/slave/storage/transactions.py14
-rw-r--r--synapse/replication/tcp/client.py21
-rw-r--r--synapse/replication/tcp/commands.py28
-rw-r--r--synapse/replication/tcp/protocol.py135
-rw-r--r--synapse/replication/tcp/resource.py35
-rw-r--r--synapse/replication/tcp/streams.py5
-rw-r--r--synapse/rest/__init__.py72
-rw-r--r--synapse/rest/client/transactions.py47
-rw-r--r--synapse/rest/client/v1/admin.py215
-rw-r--r--synapse/rest/client/v1/base.py12
-rw-r--r--synapse/rest/client/v1/directory.py18
-rw-r--r--synapse/rest/client/v1/events.py10
-rw-r--r--synapse/rest/client/v1/initial_sync.py4
-rw-r--r--synapse/rest/client/v1/login.py29
-rw-r--r--synapse/rest/client/v1/logout.py8
-rw-r--r--synapse/rest/client/v1/presence.py16
-rw-r--r--synapse/rest/client/v1/profile.py5
-rw-r--r--synapse/rest/client/v1/push_rule.py24
-rw-r--r--synapse/rest/client/v1/pusher.py32
-rw-r--r--synapse/rest/client/v1/room.py104
-rw-r--r--synapse/rest/client/v1/voip.py9
-rw-r--r--synapse/rest/client/v1_only/__init__.py3
-rw-r--r--synapse/rest/client/v1_only/base.py39
-rw-r--r--synapse/rest/client/v1_only/register.py (renamed from synapse/rest/client/v1/register.py)118
-rw-r--r--synapse/rest/client/v2_alpha/account.py105
-rw-r--r--synapse/rest/client/v2_alpha/account_data.py10
-rw-r--r--synapse/rest/client/v2_alpha/auth.py5
-rw-r--r--synapse/rest/client/v2_alpha/devices.py27
-rw-r--r--synapse/rest/client/v2_alpha/filter.py10
-rw-r--r--synapse/rest/client/v2_alpha/groups.py4
-rw-r--r--synapse/rest/client/v2_alpha/keys.py7
-rw-r--r--synapse/rest/client/v2_alpha/notifications.py11
-rw-r--r--synapse/rest/client/v2_alpha/openid.py10
-rw-r--r--synapse/rest/client/v2_alpha/read_marker.py6
-rw-r--r--synapse/rest/client/v2_alpha/receipts.py6
-rw-r--r--synapse/rest/client/v2_alpha/register.py75
-rw-r--r--synapse/rest/client/v2_alpha/report_event.py32
-rw-r--r--synapse/rest/client/v2_alpha/sendtodevice.py2
-rw-r--r--synapse/rest/client/v2_alpha/sync.py28
-rw-r--r--synapse/rest/client/v2_alpha/tags.py10
-rw-r--r--synapse/rest/client/v2_alpha/thirdparty.py1
-rw-r--r--synapse/rest/client/v2_alpha/user_directory.py1
-rw-r--r--synapse/rest/client/versions.py17
-rw-r--r--synapse/rest/consent/consent_resource.py27
-rw-r--r--synapse/rest/key/v1/server_key_resource.py10
-rw-r--r--synapse/rest/key/v2/__init__.py1
-rw-r--r--synapse/rest/key/v2/local_key_resource.py10
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py16
-rw-r--r--synapse/rest/media/v0/content_repository.py18
-rw-r--r--synapse/rest/media/v1/_base.py19
-rw-r--r--synapse/rest/media/v1/config_resource.py48
-rw-r--r--synapse/rest/media/v1/download_resource.py6
-rw-r--r--synapse/rest/media/v1/filepath.py2
-rw-r--r--synapse/rest/media/v1/identicon_resource.py7
-rw-r--r--synapse/rest/media/v1/media_repository.py69
-rw-r--r--synapse/rest/media/v1/media_storage.py24
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py39
-rw-r--r--synapse/rest/media/v1/storage_provider.py11
-rw-r--r--synapse/rest/media/v1/thumbnail_resource.py11
-rw-r--r--synapse/rest/media/v1/thumbnailer.py4
-rw-r--r--synapse/rest/media/v1/upload_resource.py16
-rw-r--r--synapse/secrets.py41
-rw-r--r--synapse/server.py98
-rw-r--r--synapse/server_notices/consent_server_notices.py3
-rw-r--r--synapse/server_notices/resource_limits_server_notices.py203
-rw-r--r--synapse/server_notices/server_notices_manager.py41
-rw-r--r--synapse/server_notices/server_notices_sender.py33
-rw-r--r--synapse/state/__init__.py (renamed from synapse/state.py)564
-rw-r--r--synapse/state/v1.py321
-rw-r--r--synapse/static/client/register/index.html2
-rw-r--r--synapse/static/client/register/js/recaptcha_ajax.js195
-rw-r--r--synapse/storage/__init__.py65
-rw-r--r--synapse/storage/_base.py90
-rw-r--r--synapse/storage/account_data.py31
-rw-r--r--synapse/storage/appservice.py8
-rw-r--r--synapse/storage/background_updates.py22
-rw-r--r--synapse/storage/client_ips.py37
-rw-r--r--synapse/storage/deviceinbox.py18
-rw-r--r--synapse/storage/devices.py77
-rw-r--r--synapse/storage/directory.py9
-rw-r--r--synapse/storage/end_to_end_keys.py40
-rw-r--r--synapse/storage/engines/__init__.py7
-rw-r--r--synapse/storage/engines/postgres.py9
-rw-r--r--synapse/storage/engines/sqlite3.py4
-rw-r--r--synapse/storage/event_federation.py41
-rw-r--r--synapse/storage/event_push_actions.py34
-rw-r--r--synapse/storage/events.py422
-rw-r--r--synapse/storage/events_worker.py204
-rw-r--r--synapse/storage/filtering.py10
-rw-r--r--synapse/storage/group_server.py5
-rw-r--r--synapse/storage/keys.py14
-rw-r--r--synapse/storage/monthly_active_users.py221
-rw-r--r--synapse/storage/prepare_database.py3
-rw-r--r--synapse/storage/presence.py10
-rw-r--r--synapse/storage/profile.py6
-rw-r--r--synapse/storage/push_rule.py35
-rw-r--r--synapse/storage/pusher.py26
-rw-r--r--synapse/storage/receipts.py40
-rw-r--r--synapse/storage/registration.py47
-rw-r--r--synapse/storage/rejections.py4
-rw-r--r--synapse/storage/room.py101
-rw-r--r--synapse/storage/roommember.py93
-rw-r--r--synapse/storage/schema/delta/25/fts.py6
-rw-r--r--synapse/storage/schema/delta/27/ts.py4
-rw-r--r--synapse/storage/schema/delta/30/as_users.py2
-rw-r--r--synapse/storage/schema/delta/31/search_update.py7
-rw-r--r--synapse/storage/schema/delta/33/event_fields.py5
-rw-r--r--synapse/storage/schema/delta/33/remote_media_ts.py1
-rw-r--r--synapse/storage/schema/delta/34/cache_stream.py6
-rw-r--r--synapse/storage/schema/delta/34/received_txn_purge.py4
-rw-r--r--synapse/storage/schema/delta/34/sent_txn_purge.py4
-rw-r--r--synapse/storage/schema/delta/37/remove_auth_idx.py6
-rw-r--r--synapse/storage/schema/delta/42/user_dir.py2
-rw-r--r--synapse/storage/schema/delta/50/erasure_store.sql21
-rw-r--r--synapse/storage/schema/delta/50/make_event_content_nullable.py92
-rw-r--r--synapse/storage/schema/delta/51/monthly_active_users.sql27
-rw-r--r--synapse/storage/schema/full_schemas/16/event_edges.sql3
-rw-r--r--synapse/storage/schema/full_schemas/16/im.sql7
-rw-r--r--synapse/storage/search.py8
-rw-r--r--synapse/storage/signatures.py10
-rw-r--r--synapse/storage/state.py454
-rw-r--r--synapse/storage/stream.py36
-rw-r--r--synapse/storage/tags.py13
-rw-r--r--synapse/storage/transactions.py23
-rw-r--r--synapse/storage/user_directory.py18
-rw-r--r--synapse/storage/user_erasure_store.py103
-rw-r--r--synapse/storage/util/id_generators.py2
-rw-r--r--synapse/streams/config.py35
-rw-r--r--synapse/streams/events.py7
-rw-r--r--synapse/types.py5
-rw-r--r--synapse/util/__init__.py32
-rw-r--r--synapse/util/async_helpers.py (renamed from synapse/util/async.py)229
-rw-r--r--synapse/util/caches/__init__.py6
-rw-r--r--synapse/util/caches/descriptors.py155
-rw-r--r--synapse/util/caches/dictionary_cache.py34
-rw-r--r--synapse/util/caches/expiringcache.py11
-rw-r--r--synapse/util/caches/lrucache.py2
-rw-r--r--synapse/util/caches/response_cache.py2
-rw-r--r--synapse/util/caches/snapshot_cache.py2
-rw-r--r--synapse/util/caches/stream_change_cache.py20
-rw-r--r--synapse/util/distributor.py48
-rw-r--r--synapse/util/file_consumer.py20
-rw-r--r--synapse/util/frozenutils.py10
-rw-r--r--synapse/util/httpresourcetree.py4
-rw-r--r--synapse/util/logcontext.py201
-rw-r--r--synapse/util/logformatter.py3
-rw-r--r--synapse/util/logutils.py16
-rw-r--r--synapse/util/manhole.py6
-rw-r--r--synapse/util/metrics.py40
-rw-r--r--synapse/util/msisdn.py1
-rw-r--r--synapse/util/ratelimitutils.py47
-rw-r--r--synapse/util/retryutils.py9
-rw-r--r--synapse/util/rlimit.py3
-rw-r--r--synapse/util/stringutils.py16
-rw-r--r--synapse/util/versionstring.py16
-rw-r--r--synapse/visibility.py315
-rw-r--r--tests/__init__.py4
-rw-r--r--tests/api/test_auth.py212
-rw-r--r--tests/api/test_filtering.py373
-rw-r--r--tests/api/test_ratelimiting.py7
-rw-r--r--tests/app/__init__.py0
-rw-r--r--tests/app/test_frontend_proxy.py88
-rw-r--r--tests/appservice/test_appservice.py95
-rw-r--r--tests/appservice/test_scheduler.py43
-rw-r--r--tests/config/test_generate.py39
-rw-r--r--tests/config/test_load.py41
-rw-r--r--tests/crypto/test_event_signing.py29
-rw-r--r--tests/crypto/test_keyring.py62
-rw-r--r--tests/events/test_utils.py139
-rw-r--r--tests/federation/__init__.py0
-rw-r--r--tests/federation/test_federation_server.py54
-rw-r--r--tests/handlers/test_appservice.py47
-rw-r--r--tests/handlers/test_auth.py126
-rw-r--r--tests/handlers/test_device.py102
-rw-r--r--tests/handlers/test_directory.py29
-rw-r--r--tests/handlers/test_e2e_keys.py104
-rw-r--r--tests/handlers/test_presence.py228
-rw-r--r--tests/handlers/test_profile.py36
-rw-r--r--tests/handlers/test_register.py83
-rw-r--r--tests/handlers/test_sync.py71
-rw-r--r--tests/handlers/test_typing.py250
-rw-r--r--tests/http/__init__.py0
-rw-r--r--tests/http/test_endpoint.py51
-rw-r--r--tests/replication/slave/storage/_base.py58
-rw-r--r--tests/replication/slave/storage/test_account_data.py26
-rw-r--r--tests/replication/slave/storage/test_events.py115
-rw-r--r--tests/replication/slave/storage/test_receipts.py10
-rw-r--r--tests/rest/client/test_transactions.py50
-rw-r--r--tests/rest/client/v1/test_admin.py308
-rw-r--r--tests/rest/client/v1/test_events.py128
-rw-r--r--tests/rest/client/v1/test_presence.py72
-rw-r--r--tests/rest/client/v1/test_profile.py48
-rw-r--r--tests/rest/client/v1/test_register.py75
-rw-r--r--tests/rest/client/v1/test_rooms.py1168
-rw-r--r--tests/rest/client/v1/test_typing.py105
-rw-r--r--tests/rest/client/v1/utils.py191
-rw-r--r--tests/rest/client/v2_alpha/__init__.py62
-rw-r--r--tests/rest/client/v2_alpha/test_filter.py133
-rw-r--r--tests/rest/client/v2_alpha/test_register.py201
-rw-r--r--tests/rest/client/v2_alpha/test_sync.py73
-rw-r--r--tests/rest/media/v1/test_media_storage.py23
-rw-r--r--tests/server.py241
-rw-r--r--tests/server_notices/__init__.py0
-rw-r--r--tests/server_notices/test_resource_limits_server_notices.py213
-rw-r--r--tests/storage/test__base.py13
-rw-r--r--tests/storage/test_appservice.py195
-rw-r--r--tests/storage/test_background_update.py27
-rw-r--r--tests/storage/test_base.py58
-rw-r--r--tests/storage/test_client_ips.py71
-rw-r--r--tests/storage/test_devices.py72
-rw-r--r--tests/storage/test_directory.py28
-rw-r--r--tests/storage/test_end_to_end_keys.py64
-rw-r--r--tests/storage/test_event_federation.py41
-rw-r--r--tests/storage/test_event_push_actions.py83
-rw-r--r--tests/storage/test_keys.py14
-rw-r--r--tests/storage/test_monthly_active_users.py131
-rw-r--r--tests/storage/test_presence.py75
-rw-r--r--tests/storage/test_profile.py22
-rw-r--r--tests/storage/test_purge.py106
-rw-r--r--tests/storage/test_redaction.py82
-rw-r--r--tests/storage/test_registration.py43
-rw-r--r--tests/storage/test_room.py55
-rw-r--r--tests/storage/test_roommember.py43
-rw-r--r--tests/storage/test_state.py435
-rw-r--r--tests/storage/test_user_directory.py43
-rw-r--r--tests/test_distributor.py71
-rw-r--r--tests/test_dns.py29
-rw-r--r--tests/test_event_auth.py144
-rw-r--r--tests/test_federation.py245
-rw-r--r--tests/test_mau.py217
-rw-r--r--tests/test_preview.py62
-rw-r--r--tests/test_server.py123
-rw-r--r--tests/test_state.py293
-rw-r--r--tests/test_test_utils.py6
-rw-r--r--tests/test_types.py15
-rw-r--r--tests/test_visibility.py329
-rw-r--r--tests/unittest.py180
-rw-r--r--tests/util/caches/test_descriptors.py151
-rw-r--r--tests/util/test_dict_cache.py35
-rw-r--r--tests/util/test_expiring_cache.py5
-rw-r--r--tests/util/test_file_consumer.py20
-rw-r--r--tests/util/test_limiter.py70
-rw-r--r--tests/util/test_linearizer.py99
-rw-r--r--tests/util/test_logcontext.py31
-rw-r--r--tests/util/test_logformatter.py1
-rw-r--r--tests/util/test_lrucache.py6
-rw-r--r--tests/util/test_rwlock.py13
-rw-r--r--tests/util/test_snapshot_cache.py6
-rw-r--r--tests/util/test_stream_change_cache.py16
-rw-r--r--tests/util/test_treecache.py4
-rw-r--r--tests/util/test_wheel_timer.py4
-rw-r--r--tests/utils.py291
-rw-r--r--tox.ini34
441 files changed, 24237 insertions, 11570 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
new file mode 100644
index 0000000000..e03f01b837
--- /dev/null
+++ b/.circleci/config.yml
@@ -0,0 +1,48 @@
+version: 2
+jobs:
+  sytestpy2:
+    machine: true
+    steps:
+      - checkout
+      - run: docker pull matrixdotorg/sytest-synapsepy2
+      - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
+      - store_artifacts:
+          path: ~/project/logs
+          destination: logs
+  sytestpy2postgres:
+    machine: true
+    steps:
+      - checkout
+      - run: docker pull matrixdotorg/sytest-synapsepy2
+      - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
+      - store_artifacts:
+          path: ~/project/logs
+          destination: logs
+  sytestpy3:
+    machine: true
+    steps:
+      - checkout
+      - run: docker pull matrixdotorg/sytest-synapsepy3
+      - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3
+      - store_artifacts:
+          path: ~/project/logs
+          destination: logs
+  sytestpy3postgres:
+    machine: true
+    steps:
+      - checkout
+      - run: docker pull matrixdotorg/sytest-synapsepy3
+      - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
+      - store_artifacts:
+          path: ~/project/logs
+          destination: logs
+
+workflows:
+  version: 2
+  build:
+    jobs:
+      - sytestpy2
+      - sytestpy2postgres
+# Currently broken while the Python 3 port is incomplete
+#      - sytestpy3
+#      - sytestpy3postgres
diff --git a/.dockerignore b/.dockerignore
index f36f86fbb7..6cdb8532d3 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -3,3 +3,6 @@ Dockerfile
 .gitignore
 demo/etc
 tox.ini
+synctl
+.git/*
+.tox/*
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index d2050a3e44..21acb3202a 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -27,8 +27,9 @@ Describe here the problem that you are experiencing, or the feature you are requ
 
 Describe how what happens differs from what you expected.
 
-If you can identify any relevant log snippets from _homeserver.log_, please include
-those here (please be careful to remove any personal or private data):
+<!-- If you can identify any relevant log snippets from _homeserver.log_, please include
+those (please be careful to remove any personal or private data). Please surround them with
+``` (three backticks, on a line on their own), so that they are formatted legibly. -->
 
 ### Version information
 
diff --git a/.travis.yml b/.travis.yml
index e6ba6f4752..318701c9f8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,7 +4,15 @@ language: python
 # tell travis to cache ~/.cache/pip
 cache: pip
 
+before_script:
+  - git remote set-branches --add origin develop
+  - git fetch origin develop
+
+services:
+  - postgresql
+
 matrix:
+  fast_finish: true
   include:
   - python: 2.7
     env: TOX_ENV=packaging
@@ -14,10 +22,23 @@ matrix:
 
   - python: 2.7
     env: TOX_ENV=py27
-    
+
+  - python: 2.7
+    env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
+
   - python: 3.6
     env: TOX_ENV=py36
 
+  - python: 3.6
+    env: TOX_ENV=check_isort
+
+  - python: 3.6
+    env: TOX_ENV=check-newsfragment
+
+  allow_failures:
+  - python: 2.7
+    env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
+
 install:
   - pip install tox
 
diff --git a/AUTHORS.rst b/AUTHORS.rst
index e13ac5ad34..9a83d90153 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -62,4 +62,7 @@ Christoph Witzany <christoph at web.crofting.com>
  * Add LDAP support for authentication
 
 Pierre Jaury <pierre at jaury.eu>
-* Docker packaging
\ No newline at end of file
+* Docker packaging
+
+Serban Constantin <serban.constantin at gmail dot com>
+ * Small bug fix
\ No newline at end of file
diff --git a/CHANGES.md b/CHANGES.md
new file mode 100644
index 0000000000..a35f5aebc7
--- /dev/null
+++ b/CHANGES.md
@@ -0,0 +1,2634 @@
+Synapse 0.33.3 (2018-08-22)
+===========================
+
+Bugfixes
+--------
+
+- Fix bug introduced in v0.33.3rc1 which made the ToS give a 500 error ([\#3732](https://github.com/matrix-org/synapse/issues/3732))
+
+
+Synapse 0.33.3rc2 (2018-08-21)
+==============================
+
+Bugfixes
+--------
+
+- Fix bug in v0.33.3rc1 which caused infinite loops and OOMs ([\#3723](https://github.com/matrix-org/synapse/issues/3723))
+
+
+Synapse 0.33.3rc1 (2018-08-21)
+==============================
+
+Features
+--------
+
+- Add support for the SNI extension to federation TLS connections. Thanks to @vojeroen! ([\#3439](https://github.com/matrix-org/synapse/issues/3439))
+- Add /_media/r0/config ([\#3184](https://github.com/matrix-org/synapse/issues/3184))
+- speed up /members API and add `at` and `membership` params as per MSC1227 ([\#3568](https://github.com/matrix-org/synapse/issues/3568))
+- implement `summary` block in /sync response as per MSC688 ([\#3574](https://github.com/matrix-org/synapse/issues/3574))
+- Add lazy-loading support to /messages as per MSC1227 ([\#3589](https://github.com/matrix-org/synapse/issues/3589))
+- Add ability to limit number of monthly active users on the server ([\#3633](https://github.com/matrix-org/synapse/issues/3633))
+- Support more federation endpoints on workers ([\#3653](https://github.com/matrix-org/synapse/issues/3653))
+- Basic support for room versioning ([\#3654](https://github.com/matrix-org/synapse/issues/3654))
+- Ability to disable client/server Synapse via conf toggle ([\#3655](https://github.com/matrix-org/synapse/issues/3655))
+- Ability to whitelist specific threepids against monthly active user limiting ([\#3662](https://github.com/matrix-org/synapse/issues/3662))
+- Add some metrics for the appservice and federation event sending loops ([\#3664](https://github.com/matrix-org/synapse/issues/3664))
+- Where server is disabled, block ability for locked out users to read new messages ([\#3670](https://github.com/matrix-org/synapse/issues/3670))
+- set admin uri via config, to be used in error messages where the user should contact the administrator ([\#3687](https://github.com/matrix-org/synapse/issues/3687))
+- Synapse's presence functionality can now be disabled with the "use_presence" configuration option. ([\#3694](https://github.com/matrix-org/synapse/issues/3694))
+- For resource limit blocked users, prevent writing into rooms ([\#3708](https://github.com/matrix-org/synapse/issues/3708))
+
+
+Bugfixes
+--------
+
+- Fix occasional glitches in the synapse_event_persisted_position metric ([\#3658](https://github.com/matrix-org/synapse/issues/3658))
+- Fix bug on deleting 3pid when using identity servers that don't support unbind API ([\#3661](https://github.com/matrix-org/synapse/issues/3661))
+- Make the tests pass on Twisted < 18.7.0 ([\#3676](https://github.com/matrix-org/synapse/issues/3676))
+- Don’t ship recaptcha_ajax.js, use it directly from Google ([\#3677](https://github.com/matrix-org/synapse/issues/3677))
+- Fixes test_reap_monthly_active_users so it passes under postgres ([\#3681](https://github.com/matrix-org/synapse/issues/3681))
+- Fix mau blocking calulation bug on login ([\#3689](https://github.com/matrix-org/synapse/issues/3689))
+- Fix missing yield in synapse.storage.monthly_active_users.initialise_reserved_users ([\#3692](https://github.com/matrix-org/synapse/issues/3692))
+- Improve HTTP request logging to include all requests ([\#3700](https://github.com/matrix-org/synapse/issues/3700))
+- Avoid timing out requests while we are streaming back the response ([\#3701](https://github.com/matrix-org/synapse/issues/3701))
+- Support more federation endpoints on workers ([\#3705](https://github.com/matrix-org/synapse/issues/3705), [\#3713](https://github.com/matrix-org/synapse/issues/3713))
+- Fix "Starting db txn 'get_all_updated_receipts' from sentinel context" warning ([\#3710](https://github.com/matrix-org/synapse/issues/3710))
+- Fix bug where `state_cache` cache factor ignored environment variables ([\#3719](https://github.com/matrix-org/synapse/issues/3719))
+
+
+Deprecations and Removals
+-------------------------
+
+- The Shared-Secret registration method of the legacy v1/register REST endpoint has been removed. For a replacement, please see [the admin/register API documentation](https://github.com/matrix-org/synapse/blob/master/docs/admin_api/register_api.rst). ([\#3703](https://github.com/matrix-org/synapse/issues/3703))
+
+
+Internal Changes
+----------------
+
+- The test suite now can run under PostgreSQL. ([\#3423](https://github.com/matrix-org/synapse/issues/3423))
+- Refactor HTTP replication endpoints to reduce code duplication ([\#3632](https://github.com/matrix-org/synapse/issues/3632))
+- Tests now correctly execute on Python 3. ([\#3647](https://github.com/matrix-org/synapse/issues/3647))
+- Sytests can now be run inside a Docker container. ([\#3660](https://github.com/matrix-org/synapse/issues/3660))
+- Port over enough to Python 3 to allow the sytests to start. ([\#3668](https://github.com/matrix-org/synapse/issues/3668))
+- Update docker base image from alpine 3.7 to 3.8. ([\#3669](https://github.com/matrix-org/synapse/issues/3669))
+- Rename synapse.util.async to synapse.util.async_helpers to mitigate async becoming a keyword on Python 3.7. ([\#3678](https://github.com/matrix-org/synapse/issues/3678))
+- Synapse's tests are now formatted with the black autoformatter. ([\#3679](https://github.com/matrix-org/synapse/issues/3679))
+- Implemented a new testing base class to reduce test boilerplate. ([\#3684](https://github.com/matrix-org/synapse/issues/3684))
+- Rename MAU prometheus metrics ([\#3690](https://github.com/matrix-org/synapse/issues/3690))
+- add new error type ResourceLimit ([\#3707](https://github.com/matrix-org/synapse/issues/3707))
+- Logcontexts for replication command handlers ([\#3709](https://github.com/matrix-org/synapse/issues/3709))
+- Update admin register API documentation to reference a real user ID. ([\#3712](https://github.com/matrix-org/synapse/issues/3712))
+
+
+Synapse 0.33.2 (2018-08-09)
+===========================
+
+No significant changes.
+
+
+Synapse 0.33.2rc1 (2018-08-07)
+==============================
+
+Features
+--------
+
+- add support for the lazy_loaded_members filter as per MSC1227 ([\#2970](https://github.com/matrix-org/synapse/issues/2970))
+- add support for the include_redundant_members filter param as per MSC1227 ([\#3331](https://github.com/matrix-org/synapse/issues/3331))
+- Add metrics to track resource usage by background processes ([\#3553](https://github.com/matrix-org/synapse/issues/3553), [\#3556](https://github.com/matrix-org/synapse/issues/3556), [\#3604](https://github.com/matrix-org/synapse/issues/3604), [\#3610](https://github.com/matrix-org/synapse/issues/3610))
+- Add `code` label to `synapse_http_server_response_time_seconds` prometheus metric ([\#3554](https://github.com/matrix-org/synapse/issues/3554))
+- Add support for client_reader to handle more APIs ([\#3555](https://github.com/matrix-org/synapse/issues/3555), [\#3597](https://github.com/matrix-org/synapse/issues/3597))
+- make the /context API filter & lazy-load aware as per MSC1227 ([\#3567](https://github.com/matrix-org/synapse/issues/3567))
+- Add ability to limit number of monthly active users on the server ([\#3630](https://github.com/matrix-org/synapse/issues/3630))
+- When we fail to join a room over federation, pass the error code back to the client. ([\#3639](https://github.com/matrix-org/synapse/issues/3639))
+- Add a new /admin/register API for non-interactively creating users. ([\#3415](https://github.com/matrix-org/synapse/issues/3415))
+
+
+Bugfixes
+--------
+
+- Make /directory/list API return 404 for room not found instead of 400. Thanks to @fuzzmz! ([\#3620](https://github.com/matrix-org/synapse/issues/3620))
+- Default inviter_display_name to mxid for email invites ([\#3391](https://github.com/matrix-org/synapse/issues/3391))
+- Don't generate TURN credentials if no TURN config options are set ([\#3514](https://github.com/matrix-org/synapse/issues/3514))
+- Correctly announce deleted devices over federation ([\#3520](https://github.com/matrix-org/synapse/issues/3520))
+- Catch failures saving metrics captured by Measure, and instead log the faulty metrics information for further analysis. ([\#3548](https://github.com/matrix-org/synapse/issues/3548))
+- Unicode passwords are now normalised before hashing, preventing the instance where two different devices or browsers might send a different UTF-8 sequence for the password. ([\#3569](https://github.com/matrix-org/synapse/issues/3569))
+- Fix potential stack overflow and deadlock under heavy load ([\#3570](https://github.com/matrix-org/synapse/issues/3570))
+- Respond with M_NOT_FOUND when profiles are not found locally or over federation. Fixes #3585 ([\#3585](https://github.com/matrix-org/synapse/issues/3585))
+- Fix failure to persist events over federation under load ([\#3601](https://github.com/matrix-org/synapse/issues/3601))
+- Fix updating of cached remote profiles ([\#3605](https://github.com/matrix-org/synapse/issues/3605))
+- Fix 'tuple index out of range' error ([\#3607](https://github.com/matrix-org/synapse/issues/3607))
+- Only import secrets when available (fix for py < 3.6) ([\#3626](https://github.com/matrix-org/synapse/issues/3626))
+
+
+Internal Changes
+----------------
+
+- Remove redundant checks on who_forgot_in_room ([\#3350](https://github.com/matrix-org/synapse/issues/3350))
+- Remove unnecessary event re-signing hacks ([\#3367](https://github.com/matrix-org/synapse/issues/3367))
+- Rewrite cache list decorator ([\#3384](https://github.com/matrix-org/synapse/issues/3384))
+- Move v1-only REST APIs into their own module. ([\#3460](https://github.com/matrix-org/synapse/issues/3460))
+- Replace more instances of Python 2-only iteritems and itervalues uses. ([\#3562](https://github.com/matrix-org/synapse/issues/3562))
+- Refactor EventContext to accept state during init ([\#3577](https://github.com/matrix-org/synapse/issues/3577))
+- Improve Dockerfile and docker-compose instructions ([\#3543](https://github.com/matrix-org/synapse/issues/3543))
+- Release notes are now in the Markdown format. ([\#3552](https://github.com/matrix-org/synapse/issues/3552))
+- add config for pep8 ([\#3559](https://github.com/matrix-org/synapse/issues/3559))
+- Merge Linearizer and Limiter ([\#3571](https://github.com/matrix-org/synapse/issues/3571), [\#3572](https://github.com/matrix-org/synapse/issues/3572))
+- Lazily load state on master process when using workers to reduce DB consumption ([\#3579](https://github.com/matrix-org/synapse/issues/3579), [\#3581](https://github.com/matrix-org/synapse/issues/3581), [\#3582](https://github.com/matrix-org/synapse/issues/3582), [\#3584](https://github.com/matrix-org/synapse/issues/3584))
+- Fixes and optimisations for resolve_state_groups ([\#3586](https://github.com/matrix-org/synapse/issues/3586))
+- Improve logging for exceptions when handling PDUs ([\#3587](https://github.com/matrix-org/synapse/issues/3587))
+- Add some measure blocks to persist_events ([\#3590](https://github.com/matrix-org/synapse/issues/3590))
+- Fix some random logcontext leaks. ([\#3591](https://github.com/matrix-org/synapse/issues/3591), [\#3606](https://github.com/matrix-org/synapse/issues/3606))
+- Speed up calculating state deltas in persist_event loop ([\#3592](https://github.com/matrix-org/synapse/issues/3592))
+- Attempt to reduce amount of state pulled out of DB during persist_events ([\#3595](https://github.com/matrix-org/synapse/issues/3595))
+- Fix a documentation typo in on_make_leave_request ([\#3609](https://github.com/matrix-org/synapse/issues/3609))
+- Make EventStore inherit from EventFederationStore ([\#3612](https://github.com/matrix-org/synapse/issues/3612))
+- Remove some redundant joins on event_edges.room_id ([\#3613](https://github.com/matrix-org/synapse/issues/3613))
+- Stop populating events.content ([\#3614](https://github.com/matrix-org/synapse/issues/3614))
+- Update the /send_leave path registration to use event_id rather than a transaction ID. ([\#3616](https://github.com/matrix-org/synapse/issues/3616))
+- Refactor FederationHandler to move DB writes into separate functions ([\#3621](https://github.com/matrix-org/synapse/issues/3621))
+- Remove unused field "pdu_failures" from transactions. ([\#3628](https://github.com/matrix-org/synapse/issues/3628))
+- rename replication_layer to federation_client ([\#3634](https://github.com/matrix-org/synapse/issues/3634))
+- Factor out exception handling in federation_client ([\#3638](https://github.com/matrix-org/synapse/issues/3638))
+- Refactor location of docker build script. ([\#3644](https://github.com/matrix-org/synapse/issues/3644))
+- Update CONTRIBUTING to mention newsfragments. ([\#3645](https://github.com/matrix-org/synapse/issues/3645))
+
+
+Synapse 0.33.1 (2018-08-02)
+===========================
+
+SECURITY FIXES
+--------------
+
+- Fix a potential issue where servers could request events for rooms they have not joined. ([\#3641](https://github.com/matrix-org/synapse/issues/3641))
+- Fix a potential issue where users could see events in private rooms before they joined. ([\#3642](https://github.com/matrix-org/synapse/issues/3642))
+
+Synapse 0.33.0 (2018-07-19)
+===========================
+
+Bugfixes
+--------
+
+-   Disable a noisy warning about logcontexts. ([\#3561](https://github.com/matrix-org/synapse/issues/3561))
+
+Synapse 0.33.0rc1 (2018-07-18)
+==============================
+
+Features
+--------
+
+-   Enforce the specified API for report\_event. ([\#3316](https://github.com/matrix-org/synapse/issues/3316))
+-   Include CPU time from database threads in request/block metrics. ([\#3496](https://github.com/matrix-org/synapse/issues/3496), [\#3501](https://github.com/matrix-org/synapse/issues/3501))
+-   Add CPU metrics for \_fetch\_event\_list. ([\#3497](https://github.com/matrix-org/synapse/issues/3497))
+-   Optimisation to make handling incoming federation requests more efficient. ([\#3541](https://github.com/matrix-org/synapse/issues/3541))
+
+Bugfixes
+--------
+
+-   Fix a significant performance regression in /sync. ([\#3505](https://github.com/matrix-org/synapse/issues/3505), [\#3521](https://github.com/matrix-org/synapse/issues/3521), [\#3530](https://github.com/matrix-org/synapse/issues/3530), [\#3544](https://github.com/matrix-org/synapse/issues/3544))
+-   Use more portable syntax in our use of the attrs package, widening the supported versions. ([\#3498](https://github.com/matrix-org/synapse/issues/3498))
+-   Fix queued federation requests being processed in the wrong order. ([\#3533](https://github.com/matrix-org/synapse/issues/3533))
+-   Ensure that erasure requests are correctly honoured for publicly accessible rooms when accessed over federation. ([\#3546](https://github.com/matrix-org/synapse/issues/3546))
+
+Misc
+----
+
+-   Refactoring to improve testability. ([\#3351](https://github.com/matrix-org/synapse/issues/3351), [\#3499](https://github.com/matrix-org/synapse/issues/3499))
+-   Use `isort` to sort imports. ([\#3463](https://github.com/matrix-org/synapse/issues/3463), [\#3464](https://github.com/matrix-org/synapse/issues/3464), [\#3540](https://github.com/matrix-org/synapse/issues/3540))
+-   Use parse and asserts from http.servlet. ([\#3534](https://github.com/matrix-org/synapse/issues/3534), [\#3535](https://github.com/matrix-org/synapse/issues/3535)).
+
+Synapse 0.32.2 (2018-07-07)
+===========================
+
+Bugfixes
+--------
+
+-   Amend the Python dependencies to depend on attrs from PyPI, not attr ([\#3492](https://github.com/matrix-org/synapse/issues/3492))
+
+Synapse 0.32.1 (2018-07-06)
+===========================
+
+Bugfixes
+--------
+
+-   Add explicit dependency on netaddr ([\#3488](https://github.com/matrix-org/synapse/issues/3488))
+
+Changes in synapse v0.32.0 (2018-07-06)
+=======================================
+
+No changes since 0.32.0rc1
+
+Synapse 0.32.0rc1 (2018-07-05)
+==============================
+
+Features
+--------
+
+-   Add blacklist & whitelist of servers allowed to send events to a room via `m.room.server_acl` event.
+-   Cache factor override system for specific caches ([\#3334](https://github.com/matrix-org/synapse/issues/3334))
+-   Add metrics to track appservice transactions ([\#3344](https://github.com/matrix-org/synapse/issues/3344))
+-   Try to log more helpful info when a sig verification fails ([\#3372](https://github.com/matrix-org/synapse/issues/3372))
+-   Synapse now uses the best performing JSON encoder/decoder according to your runtime (simplejson on CPython, stdlib json on PyPy). ([\#3462](https://github.com/matrix-org/synapse/issues/3462))
+-   Add optional ip\_range\_whitelist param to AS registration files to lock AS IP access ([\#3465](https://github.com/matrix-org/synapse/issues/3465))
+-   Reject invalid server names in federation requests ([\#3480](https://github.com/matrix-org/synapse/issues/3480))
+-   Reject invalid server names in homeserver.yaml ([\#3483](https://github.com/matrix-org/synapse/issues/3483))
+
+Bugfixes
+--------
+
+-   Strip access\_token from outgoing requests ([\#3327](https://github.com/matrix-org/synapse/issues/3327))
+-   Redact AS tokens in logs ([\#3349](https://github.com/matrix-org/synapse/issues/3349))
+-   Fix federation backfill from SQLite servers ([\#3355](https://github.com/matrix-org/synapse/issues/3355))
+-   Fix event-purge-by-ts admin API ([\#3363](https://github.com/matrix-org/synapse/issues/3363))
+-   Fix event filtering in get\_missing\_events handler ([\#3371](https://github.com/matrix-org/synapse/issues/3371))
+-   Synapse is now stricter regarding accepting events which it cannot retrieve the prev\_events for. ([\#3456](https://github.com/matrix-org/synapse/issues/3456))
+-   Fix bug where synapse would explode when receiving unicode in HTTP User-Agent header ([\#3470](https://github.com/matrix-org/synapse/issues/3470))
+-   Invalidate cache on correct thread to avoid race ([\#3473](https://github.com/matrix-org/synapse/issues/3473))
+
+Improved Documentation
+----------------------
+
+-   `doc/postgres.rst`: fix display of the last command block. Thanks to @ArchangeGabriel! ([\#3340](https://github.com/matrix-org/synapse/issues/3340))
+
+Deprecations and Removals
+-------------------------
+
+-   Remove was\_forgotten\_at ([\#3324](https://github.com/matrix-org/synapse/issues/3324))
+
+Misc
+----
+
+-   [\#3332](https://github.com/matrix-org/synapse/issues/3332), [\#3341](https://github.com/matrix-org/synapse/issues/3341), [\#3347](https://github.com/matrix-org/synapse/issues/3347), [\#3348](https://github.com/matrix-org/synapse/issues/3348), [\#3356](https://github.com/matrix-org/synapse/issues/3356), [\#3385](https://github.com/matrix-org/synapse/issues/3385), [\#3446](https://github.com/matrix-org/synapse/issues/3446), [\#3447](https://github.com/matrix-org/synapse/issues/3447), [\#3467](https://github.com/matrix-org/synapse/issues/3467), [\#3474](https://github.com/matrix-org/synapse/issues/3474)
+
+Changes in synapse v0.31.2 (2018-06-14)
+=======================================
+
+SECURITY UPDATE: Prevent unauthorised users from setting state events in a room when there is no `m.room.power_levels` event in force in the room. (PR #3397)
+
+Discussion around the Matrix Spec change proposal for this change can be followed at <https://github.com/matrix-org/matrix-doc/issues/1304>.
+
+Changes in synapse v0.31.1 (2018-06-08)
+=======================================
+
+v0.31.1 fixes a security bug in the `get_missing_events` federation API where event visibility rules were not applied correctly.
+
+We are not aware of it being actively exploited but please upgrade asap.
+
+Bug Fixes:
+
+-   Fix event filtering in get\_missing\_events handler (PR #3371)
+
+Changes in synapse v0.31.0 (2018-06-06)
+=======================================
+
+Most notable change from v0.30.0 is to switch to the python prometheus library to improve system stats reporting. WARNING: this changes a number of prometheus metrics in a backwards-incompatible manner. For more details, see [docs/metrics-howto.rst](docs/metrics-howto.rst#removal-of-deprecated-metrics--time-based-counters-becoming-histograms-in-0310).
+
+Bug Fixes:
+
+-   Fix metric documentation tables (PR #3341)
+-   Fix LaterGauge error handling (694968f)
+-   Fix replication metrics (b7e7fd2)
+
+Changes in synapse v0.31.0-rc1 (2018-06-04)
+===========================================
+
+Features:
+
+-   Switch to the Python Prometheus library (PR #3256, #3274)
+-   Let users leave the server notice room after joining (PR #3287)
+
+Changes:
+
+-   daily user type phone home stats (PR #3264)
+-   Use iter\* methods for \_filter\_events\_for\_server (PR #3267)
+-   Docs on consent bits (PR #3268)
+-   Remove users from user directory on deactivate (PR #3277)
+-   Avoid sending consent notice to guest users (PR #3288)
+-   disable CPUMetrics if no /proc/self/stat (PR #3299)
+-   Consistently use six\'s iteritems and wrap lazy keys/values in list() if they\'re not meant to be lazy (PR #3307)
+-   Add private IPv6 addresses to example config for url preview blacklist (PR #3317) Thanks to @thegcat!
+-   Reduce stuck read-receipts: ignore depth when updating (PR #3318)
+-   Put python\'s logs into Trial when running unit tests (PR #3319)
+
+Changes, python 3 migration:
+
+-   Replace some more comparisons with six (PR #3243) Thanks to @NotAFile!
+-   replace some iteritems with six (PR #3244) Thanks to @NotAFile!
+-   Add batch\_iter to utils (PR #3245) Thanks to @NotAFile!
+-   use repr, not str (PR #3246) Thanks to @NotAFile!
+-   Misc Python3 fixes (PR #3247) Thanks to @NotAFile!
+-   Py3 storage/\_base.py (PR #3278) Thanks to @NotAFile!
+-   more six iteritems (PR #3279) Thanks to @NotAFile!
+-   More Misc. py3 fixes (PR #3280) Thanks to @NotAFile!
+-   remaining isintance fixes (PR #3281) Thanks to @NotAFile!
+-   py3-ize state.py (PR #3283) Thanks to @NotAFile!
+-   extend tox testing for py3 to avoid regressions (PR #3302) Thanks to @krombel!
+-   use memoryview in py3 (PR #3303) Thanks to @NotAFile!
+
+Bugs:
+
+-   Fix federation backfill bugs (PR #3261)
+-   federation: fix LaterGauge usage (PR #3328) Thanks to @intelfx!
+
+Changes in synapse v0.30.0 (2018-05-24)
+=======================================
+
+\'Server Notices\' are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server.
+
+They are used as part of communication of the server policies (see `docs/consent_tracking.md`), however the intention is that they may also find a use for features such as \"Message of the day\".
+
+This feature is specific to Synapse, but uses standard Matrix communication mechanisms, so should work with any Matrix client. For more details see `docs/server_notices.md`
+
+Further Server Notices/Consent Tracking Support:
+
+-   Allow overriding the server\_notices user\'s avatar (PR #3273)
+-   Use the localpart in the consent uri (PR #3272)
+-   Support for putting %(consent\_uri)s in messages (PR #3271)
+-   Block attempts to send server notices to remote users (PR #3270)
+-   Docs on consent bits (PR #3268)
+
+Changes in synapse v0.30.0-rc1 (2018-05-23)
+===========================================
+
+Server Notices/Consent Tracking Support:
+
+-   ConsentResource to gather policy consent from users (PR #3213)
+-   Move RoomCreationHandler out of synapse.handlers.Handlers (PR #3225)
+-   Infrastructure for a server notices room (PR #3232)
+-   Send users a server notice about consent (PR #3236)
+-   Reject attempts to send event before privacy consent is given (PR #3257)
+-   Add a \'has\_consented\' template var to consent forms (PR #3262)
+-   Fix dependency on jinja2 (PR #3263)
+
+Features:
+
+-   Cohort analytics (PR #3163, #3241, #3251)
+-   Add lxml to docker image for web previews (PR #3239) Thanks to @ptman!
+-   Add in flight request metrics (PR #3252)
+
+Changes:
+
+-   Remove unused update\_external\_syncs (PR #3233)
+-   Use stream rather depth ordering for push actions (PR #3212)
+-   Make purge\_history operate on tokens (PR #3221)
+-   Don\'t support limitless pagination (PR #3265)
+
+Bug Fixes:
+
+-   Fix logcontext resource usage tracking (PR #3258)
+-   Fix error in handling receipts (PR #3235)
+-   Stop the transaction cache caching failures (PR #3255)
+
+Changes in synapse v0.29.1 (2018-05-17)
+=======================================
+
+Changes:
+
+-   Update docker documentation (PR #3222)
+
+Changes in synapse v0.29.0 (2018-05-16)
+=======================================
+
+Not changes since v0.29.0-rc1
+
+Changes in synapse v0.29.0-rc1 (2018-05-14)
+===========================================
+
+Notable changes, a docker file for running Synapse (Thanks to @kaiyou!) and a closed spec bug in the Client Server API. Additionally further prep for Python 3 migration.
+
+Potentially breaking change:
+
+-   Make Client-Server API return 401 for invalid token (PR #3161).
+
+    This changes the Client-server spec to return a 401 error code instead of 403 when the access token is unrecognised. This is the behaviour required by the specification, but some clients may be relying on the old, incorrect behaviour.
+
+    Thanks to @NotAFile for fixing this.
+
+Features:
+
+-   Add a Dockerfile for synapse (PR #2846) Thanks to @kaiyou!
+
+Changes - General:
+
+-   nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77!
+-   Part user from rooms on account deactivate (PR #3201)
+-   Make \'unexpected logging context\' into warnings (PR #3007)
+-   Set Server header in SynapseRequest (PR #3208)
+-   remove duplicates from groups tables (PR #3129)
+-   Improve exception handling for background processes (PR #3138)
+-   Add missing consumeErrors to improve exception handling (PR #3139)
+-   reraise exceptions more carefully (PR #3142)
+-   Remove redundant call to preserve\_fn (PR #3143)
+-   Trap exceptions thrown within run\_in\_background (PR #3144)
+
+Changes - Refactors:
+
+-   Refactor /context to reuse pagination storage functions (PR #3193)
+-   Refactor recent events func to use pagination func (PR #3195)
+-   Refactor pagination DB API to return concrete type (PR #3196)
+-   Refactor get\_recent\_events\_for\_room return type (PR #3198)
+-   Refactor sync APIs to reuse pagination API (PR #3199)
+-   Remove unused code path from member change DB func (PR #3200)
+-   Refactor request handling wrappers (PR #3203)
+-   transaction\_id, destination defined twice (PR #3209) Thanks to @damir-manapov!
+-   Refactor event storage to prepare for changes in state calculations (PR #3141)
+-   Set Server header in SynapseRequest (PR #3208)
+-   Use deferred.addTimeout instead of time\_bound\_deferred (PR #3127, #3178)
+-   Use run\_in\_background in preference to preserve\_fn (PR #3140)
+
+Changes - Python 3 migration:
+
+-   Construct HMAC as bytes on py3 (PR #3156) Thanks to @NotAFile!
+-   run config tests on py3 (PR #3159) Thanks to @NotAFile!
+-   Open certificate files as bytes (PR #3084) Thanks to @NotAFile!
+-   Open config file in non-bytes mode (PR #3085) Thanks to @NotAFile!
+-   Make event properties raise AttributeError instead (PR #3102) Thanks to @NotAFile!
+-   Use six.moves.urlparse (PR #3108) Thanks to @NotAFile!
+-   Add py3 tests to tox with folders that work (PR #3145) Thanks to @NotAFile!
+-   Don\'t yield in list comprehensions (PR #3150) Thanks to @NotAFile!
+-   Move more xrange to six (PR #3151) Thanks to @NotAFile!
+-   make imports local (PR #3152) Thanks to @NotAFile!
+-   move httplib import to six (PR #3153) Thanks to @NotAFile!
+-   Replace stringIO imports with six (PR #3154, #3168) Thanks to @NotAFile!
+-   more bytes strings (PR #3155) Thanks to @NotAFile!
+
+Bug Fixes:
+
+-   synapse fails to start under Twisted \>= 18.4 (PR #3157)
+-   Fix a class of logcontext leaks (PR #3170)
+-   Fix a couple of logcontext leaks in unit tests (PR #3172)
+-   Fix logcontext leak in media repo (PR #3174)
+-   Escape label values in prometheus metrics (PR #3175, #3186)
+-   Fix \'Unhandled Error\' logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot!
+-   Fix logcontext leaks in rate limiter (PR #3183)
+-   notifications: Convert next\_token to string according to the spec (PR #3190) Thanks to @mujx!
+-   nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77!
+-   add guard for None on purge\_history api (PR #3160) Thanks to @krombel!
+
+Changes in synapse v0.28.1 (2018-05-01)
+=======================================
+
+SECURITY UPDATE
+
+-   Clamp the allowed values of event depth received over federation to be \[0, 2\^63 - 1\]. This mitigates an attack where malicious events injected with depth = 2\^63 - 1 render rooms unusable. Depth is used to determine the cosmetic ordering of events within a room, and so the ordering of events in such a room will default to using stream\_ordering rather than depth (topological\_ordering).
+
+    This is a temporary solution to mitigate abuse in the wild, whilst a long term solution is being implemented to improve how the depth parameter is used.
+
+    Full details at <https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI>
+
+-   Pin Twisted to \<18.4 until we stop using the private \_OpenSSLECCurve API.
+
+Changes in synapse v0.28.0 (2018-04-26)
+=======================================
+
+Bug Fixes:
+
+-   Fix quarantine media admin API and search reindex (PR #3130)
+-   Fix media admin APIs (PR #3134)
+
+Changes in synapse v0.28.0-rc1 (2018-04-24)
+===========================================
+
+Minor performance improvement to federation sending and bug fixes.
+
+(Note: This release does not include the delta state resolution implementation discussed in matrix live)
+
+Features:
+
+-   Add metrics for event processing lag (PR #3090)
+-   Add metrics for ResponseCache (PR #3092)
+
+Changes:
+
+-   Synapse on PyPy (PR #2760) Thanks to @Valodim!
+-   move handling of auto\_join\_rooms to RegisterHandler (PR #2996) Thanks to @krombel!
+-   Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh!
+-   Document the behaviour of ResponseCache (PR #3059)
+-   Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile!
+-   update prometheus dashboard to use new metric names (PR #3069) Thanks to @krombel!
+-   use python3-compatible prints (PR #3074) Thanks to @NotAFile!
+-   Send federation events concurrently (PR #3078)
+-   Limit concurrent event sends for a room (PR #3079)
+-   Improve R30 stat definition (PR #3086)
+-   Send events to ASes concurrently (PR #3088)
+-   Refactor ResponseCache usage (PR #3093)
+-   Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh!
+-   Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile!
+-   Use six.itervalues in some places (PR #3106) Thanks to @NotAFile!
+-   Refactor store.have\_events (PR #3117)
+
+Bug Fixes:
+
+-   Return 401 for invalid access\_token on logout (PR #2938) Thanks to @dklug!
+-   Return a 404 rather than a 500 on rejoining empty rooms (PR #3080)
+-   fix federation\_domain\_whitelist (PR #3099)
+-   Avoid creating events with huge numbers of prev\_events (PR #3113)
+-   Reject events which have lots of prev\_events (PR #3118)
+
+Changes in synapse v0.27.4 (2018-04-13)
+=======================================
+
+Changes:
+
+-   Update canonicaljson dependency (\#3095)
+
+Changes in synapse v0.27.3 (2018-04-11)
+======================================
+
+Bug fixes:
+
+-   URL quote path segments over federation (\#3082)
+
+Changes in synapse v0.27.3-rc2 (2018-04-09)
+===========================================
+
+v0.27.3-rc1 used a stale version of the develop branch so the changelog overstates the functionality. v0.27.3-rc2 is up to date, rc1 should be ignored.
+
+Changes in synapse v0.27.3-rc1 (2018-04-09)
+===========================================
+
+Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording \'r30\' stat which is the measure we use to track overal growth of the Matrix ecosystem. It is defined as:-
+
+Counts the number of native 30 day retained users, defined as:- \* Users who have created their accounts more than 30 days
+
+:   -   Where last seen at most 30 days ago
+    -   Where account creation and last\_seen are \> 30 days\"
+
+Features:
+
+-   Add joinability for groups (PR #3045)
+-   Implement group join API (PR #3046)
+-   Add counter metrics for calculating state delta (PR #3033)
+-   R30 stats (PR #3041)
+-   Measure time it takes to calculate state group ID (PR #3043)
+-   Add basic performance statistics to phone home (PR #3044)
+-   Add response size metrics (PR #3071)
+-   phone home cache size configurations (PR #3063)
+
+Changes:
+
+-   Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live!
+-   Replace old style error catching with \'as\' keyword (PR #3000) Thanks to @NotAFile!
+-   Use .iter\* to avoid copies in StateHandler (PR #3006)
+-   Linearize calls to \_generate\_user\_id (PR #3029)
+-   Remove last usage of ujson (PR #3030)
+-   Use simplejson throughout (PR #3048)
+-   Use static JSONEncoders (PR #3049)
+-   Remove uses of events.content (PR #3060)
+-   Improve database cache performance (PR #3068)
+
+Bug fixes:
+
+-   Add room\_id to the response of rooms/{roomId}/join (PR #2986) Thanks to @jplatte!
+-   Fix replication after switch to simplejson (PR #3015)
+-   404 correctly on missing paths via NoResource (PR #3022)
+-   Fix error when claiming e2e keys from offline servers (PR #3034)
+-   fix tests/storage/test\_user\_directory.py (PR #3042)
+-   use PUT instead of POST for federating groups/m.join\_policy (PR #3070) Thanks to @krombel!
+-   postgres port script: fix state\_groups\_pkey error (PR #3072)
+
+Changes in synapse v0.27.2 (2018-03-26)
+=======================================
+
+Bug fixes:
+
+-   Fix bug which broke TCP replication between workers (PR #3015)
+
+Changes in synapse v0.27.1 (2018-03-26)
+=======================================
+
+Meta release as v0.27.0 temporarily pointed to the wrong commit
+
+Changes in synapse v0.27.0 (2018-03-26)
+=======================================
+
+No changes since v0.27.0-rc2
+
+Changes in synapse v0.27.0-rc2 (2018-03-19)
+===========================================
+
+Pulls in v0.26.1
+
+Bug fixes:
+
+-   Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache (PR #3005)
+
+Changes in synapse v0.26.1 (2018-03-15)
+=======================================
+
+Bug fixes:
+
+-   Fix bug where an invalid event caused server to stop functioning correctly, due to parsing and serializing bugs in ujson library (PR #3008)
+
+Changes in synapse v0.27.0-rc1 (2018-03-14)
+===========================================
+
+The common case for running Synapse is not to run separate workers, but for those that do, be aware that synctl no longer starts the main synapse when using `-a` option with workers. A new worker file should be added with `worker_app: synapse.app.homeserver`.
+
+This release also begins the process of renaming a number of the metrics reported to prometheus. See [docs/metrics-howto.rst](docs/metrics-howto.rst#block-and-response-metrics-renamed-for-0-27-0). Note that the v0.28.0 release will remove the deprecated metric names.
+
+Features:
+
+-   Add ability for ASes to override message send time (PR #2754)
+-   Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767)
+-   Add purge API features, see [docs/admin\_api/purge\_history\_api.rst](docs/admin_api/purge_history_api.rst) for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
+-   Add support for whitelisting 3PIDs that users can register. (PR #2813)
+-   Add `/room/{id}/event/{id}` API (PR #2766)
+-   Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live!
+-   Add `federation_domain_whitelist` option (PR #2820, #2821)
+
+Changes:
+
+-   Continue to factor out processing from main process and into worker processes. See updated [docs/workers.rst](docs/workers.rst) (PR #2892 - \#2904, #2913, #2920 - \#2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - \#2984, #2987 - \#2989, #2991 - \#2993, #2995, #2784)
+-   Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
+-   Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
+-   No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!
+-   Remove `verbosity`/`log_file` from generated config (PR #2755)
+-   Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838)
+-   When using synctl with workers, don\'t start the main synapse automatically (PR #2774)
+-   Minor performance improvements (PR #2773, #2792)
+-   Use a connection pool for non-federation outbound connections (PR #2817)
+-   Make it possible to run unit tests against postgres (PR #2829)
+-   Update pynacl dependency to 1.2.1 or higher (PR #2888) Thanks to @bachp!
+-   Remove ability for AS users to call /events and /sync (PR #2948)
+-   Use bcrypt.checkpw (PR #2949) Thanks to @krombel!
+
+Bug fixes:
+
+-   Fix broken `ldap_config` config option (PR #2683) Thanks to @seckrv!
+-   Fix error message when user is not allowed to unban (PR #2761) Thanks to @turt2live!
+-   Fix publicised groups GET API (singular) over federation (PR #2772)
+-   Fix user directory when using `user_directory_search_all_users` config option (PR #2803, #2831)
+-   Fix error on `/publicRooms` when no rooms exist (PR #2827)
+-   Fix bug in quarantine\_media (PR #2837)
+-   Fix url\_previews when no Content-Type is returned from URL (PR #2845)
+-   Fix rare race in sync API when joining room (PR #2944)
+-   Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848)
+
+Changes in synapse v0.26.0 (2018-01-05)
+=======================================
+
+No changes since v0.26.0-rc1
+
+Changes in synapse v0.26.0-rc1 (2017-12-13)
+===========================================
+
+Features:
+
+-   Add ability for ASes to publicise groups for their users (PR #2686)
+-   Add all local users to the user\_directory and optionally search them (PR #2723)
+-   Add support for custom login types for validating users (PR #2729)
+
+Changes:
+
+-   Update example Prometheus config to new format (PR #2648) Thanks to @krombel!
+-   Rename redact\_content option to include\_content in Push API (PR #2650)
+-   Declare support for r0.3.0 (PR #2677)
+-   Improve upserts (PR #2684, #2688, #2689, #2713)
+-   Improve documentation of workers (PR #2700)
+-   Improve tracebacks on exceptions (PR #2705)
+-   Allow guest access to group APIs for reading (PR #2715)
+-   Support for posting content in federation\_client script (PR #2716)
+-   Delete devices and pushers on logouts etc (PR #2722)
+
+Bug fixes:
+
+-   Fix database port script (PR #2673)
+-   Fix internal server error on login with ldap\_auth\_provider (PR #2678) Thanks to @jkolo!
+-   Fix error on sqlite 3.7 (PR #2697)
+-   Fix OPTIONS on preview\_url (PR #2707)
+-   Fix error handling on dns lookup (PR #2711)
+-   Fix wrong avatars when inviting multiple users when creating room (PR #2717)
+-   Fix 500 when joining matrix-dev (PR #2719)
+
+Changes in synapse v0.25.1 (2017-11-17)
+=======================================
+
+Bug fixes:
+
+-   Fix login with LDAP and other password provider modules (PR #2678). Thanks to @jkolo!
+
+Changes in synapse v0.25.0 (2017-11-15)
+=======================================
+
+Bug fixes:
+
+-   Fix port script (PR #2673)
+
+Changes in synapse v0.25.0-rc1 (2017-11-14)
+===========================================
+
+Features:
+
+-   Add is\_public to groups table to allow for private groups (PR #2582)
+-   Add a route for determining who you are (PR #2668) Thanks to @turt2live!
+-   Add more features to the password providers (PR #2608, #2610, #2620, #2622, #2623, #2624, #2626, #2628, #2629)
+-   Add a hook for custom rest endpoints (PR #2627)
+-   Add API to update group room visibility (PR #2651)
+
+Changes:
+
+-   Ignore \<noscript\> tags when generating URL preview descriptions (PR #2576) Thanks to @maximevaillancourt!
+-   Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to @krombel!
+-   Support /keys/upload on /r0 as well as /unstable (PR #2585)
+-   Front-end proxy: pass through auth header (PR #2586)
+-   Allow ASes to deactivate their own users (PR #2589)
+-   Remove refresh tokens (PR #2613)
+-   Automatically set default displayname on register (PR #2617)
+-   Log login requests (PR #2618)
+-   Always return is\_public in the /groups/:group\_id/rooms API (PR #2630)
+-   Avoid no-op media deletes (PR #2637) Thanks to @spantaleev!
+-   Fix various embarrassing typos around user\_directory and add some doc. (PR #2643)
+-   Return whether a user is an admin within a group (PR #2647)
+-   Namespace visibility options for groups (PR #2657)
+-   Downcase UserIDs on registration (PR #2662)
+-   Cache failures when fetching URL previews (PR #2669)
+
+Bug fixes:
+
+-   Fix port script (PR #2577)
+-   Fix error when running synapse with no logfile (PR #2581)
+-   Fix UI auth when deleting devices (PR #2591)
+-   Fix typo when checking if user is invited to group (PR #2599)
+-   Fix the port script to drop NUL values in all tables (PR #2611)
+-   Fix appservices being backlogged and not receiving new events due to a bug in notify\_interested\_services (PR #2631) Thanks to @xyzz!
+-   Fix updating rooms avatar/display name when modified by admin (PR #2636) Thanks to @farialima!
+-   Fix bug in state group storage (PR #2649)
+-   Fix 500 on invalid utf-8 in request (PR #2663)
+
+Changes in synapse v0.24.1 (2017-10-24)
+=======================================
+
+Bug fixes:
+
+-   Fix updating group profiles over federation (PR #2567)
+
+Changes in synapse v0.24.0 (2017-10-23)
+=======================================
+
+No changes since v0.24.0-rc1
+
+Changes in synapse v0.24.0-rc1 (2017-10-19)
+===========================================
+
+Features:
+
+-   Add Group Server (PR #2352, #2363, #2374, #2377, #2378, #2382, #2410, #2426, #2430, #2454, #2471, #2472, #2544)
+-   Add support for channel notifications (PR #2501)
+-   Add basic implementation of backup media store (PR #2538)
+-   Add config option to auto-join new users to rooms (PR #2545)
+
+Changes:
+
+-   Make the spam checker a module (PR #2474)
+-   Delete expired url cache data (PR #2478)
+-   Ignore incoming events for rooms that we have left (PR #2490)
+-   Allow spam checker to reject invites too (PR #2492)
+-   Add room creation checks to spam checker (PR #2495)
+-   Spam checking: add the invitee to user\_may\_invite (PR #2502)
+-   Process events from federation for different rooms in parallel (PR #2520)
+-   Allow error strings from spam checker (PR #2531)
+-   Improve error handling for missing files in config (PR #2551)
+
+Bug fixes:
+
+-   Fix handling SERVFAILs when doing AAAA lookups for federation (PR #2477)
+-   Fix incompatibility with newer versions of ujson (PR #2483) Thanks to @jeremycline!
+-   Fix notification keywords that start/end with non-word chars (PR #2500)
+-   Fix stack overflow and logcontexts from linearizer (PR #2532)
+-   Fix 500 error when fields missing from power\_levels event (PR #2552)
+-   Fix 500 error when we get an error handling a PDU (PR #2553)
+
+Changes in synapse v0.23.1 (2017-10-02)
+=======================================
+
+Changes:
+
+-   Make \'affinity\' package optional, as it is not supported on some platforms
+
+Changes in synapse v0.23.0 (2017-10-02)
+=======================================
+
+No changes since v0.23.0-rc2
+
+Changes in synapse v0.23.0-rc2 (2017-09-26)
+===========================================
+
+Bug fixes:
+
+-   Fix regression in performance of syncs (PR #2470)
+
+Changes in synapse v0.23.0-rc1 (2017-09-25)
+===========================================
+
+Features:
+
+-   Add a frontend proxy worker (PR #2344)
+-   Add support for event\_id\_only push format (PR #2450)
+-   Add a PoC for filtering spammy events (PR #2456)
+-   Add a config option to block all room invites (PR #2457)
+
+Changes:
+
+-   Use bcrypt module instead of py-bcrypt (PR #2288) Thanks to @kyrias!
+-   Improve performance of generating push notifications (PR #2343, #2357, #2365, #2366, #2371)
+-   Improve DB performance for device list handling in sync (PR #2362)
+-   Include a sample prometheus config (PR #2416)
+-   Document known to work postgres version (PR #2433) Thanks to @ptman!
+
+Bug fixes:
+
+-   Fix caching error in the push evaluator (PR #2332)
+-   Fix bug where pusherpool didn\'t start and broke some rooms (PR #2342)
+-   Fix port script for user directory tables (PR #2375)
+-   Fix device lists notifications when user rejoins a room (PR #2443, #2449)
+-   Fix sync to always send down current state events in timeline (PR #2451)
+-   Fix bug where guest users were incorrectly kicked (PR #2453)
+-   Fix bug talking to IPv6 only servers using SRV records (PR #2462)
+
+Changes in synapse v0.22.1 (2017-07-06)
+=======================================
+
+Bug fixes:
+
+-   Fix bug where pusher pool didn\'t start and caused issues when interacting with some rooms (PR #2342)
+
+Changes in synapse v0.22.0 (2017-07-06)
+=======================================
+
+No changes since v0.22.0-rc2
+
+Changes in synapse v0.22.0-rc2 (2017-07-04)
+===========================================
+
+Changes:
+
+-   Improve performance of storing user IPs (PR #2307, #2308)
+-   Slightly improve performance of verifying access tokens (PR #2320)
+-   Slightly improve performance of event persistence (PR #2321)
+-   Increase default cache factor size from 0.1 to 0.5 (PR #2330)
+
+Bug fixes:
+
+-   Fix bug with storing registration sessions that caused frequent CPU churn (PR #2319)
+
+Changes in synapse v0.22.0-rc1 (2017-06-26)
+===========================================
+
+Features:
+
+-   Add a user directory API (PR #2252, and many more)
+-   Add shutdown room API to remove room from local server (PR #2291)
+-   Add API to quarantine media (PR #2292)
+-   Add new config option to not send event contents to push servers (PR #2301) Thanks to @cjdelisle!
+
+Changes:
+
+-   Various performance fixes (PR #2177, #2233, #2230, #2238, #2248, #2256, #2274)
+-   Deduplicate sync filters (PR #2219) Thanks to @krombel!
+-   Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist!
+-   Add count of one time keys to sync stream (PR #2237)
+-   Only store event\_auth for state events (PR #2247)
+-   Store URL cache preview downloads separately (PR #2299)
+
+Bug fixes:
+
+-   Fix users not getting notifications when AS listened to that user\_id (PR #2216) Thanks to @slipeer!
+-   Fix users without push set up not getting notifications after joining rooms (PR #2236)
+-   Fix preview url API to trim long descriptions (PR #2243)
+-   Fix bug where we used cached but unpersisted state group as prev group, resulting in broken state of restart (PR #2263)
+-   Fix removing of pushers when using workers (PR #2267)
+-   Fix CORS headers to allow Authorization header (PR #2285) Thanks to @krombel!
+
+Changes in synapse v0.21.1 (2017-06-15)
+=======================================
+
+Bug fixes:
+
+-   Fix bug in anonymous usage statistic reporting (PR #2281)
+
+Changes in synapse v0.21.0 (2017-05-18)
+=======================================
+
+No changes since v0.21.0-rc3
+
+Changes in synapse v0.21.0-rc3 (2017-05-17)
+===========================================
+
+Features:
+
+-   Add per user rate-limiting overrides (PR #2208)
+-   Add config option to limit maximum number of events requested by `/sync` and `/messages` (PR #2221) Thanks to @psaavedra!
+
+Changes:
+
+-   Various small performance fixes (PR #2201, #2202, #2224, #2226, #2227, #2228, #2229)
+-   Update username availability checker API (PR #2209, #2213)
+-   When purging, don\'t de-delta state groups we\'re about to delete (PR #2214)
+-   Documentation to check synapse version (PR #2215) Thanks to @hamber-dick!
+-   Add an index to event\_search to speed up purge history API (PR #2218)
+
+Bug fixes:
+
+-   Fix API to allow clients to upload one-time-keys with new sigs (PR #2206)
+
+Changes in synapse v0.21.0-rc2 (2017-05-08)
+===========================================
+
+Changes:
+
+-   Always mark remotes as up if we receive a signed request from them (PR #2190)
+
+Bug fixes:
+
+-   Fix bug where users got pushed for rooms they had muted (PR #2200)
+
+Changes in synapse v0.21.0-rc1 (2017-05-08)
+===========================================
+
+Features:
+
+-   Add username availability checker API (PR #2183)
+-   Add read marker API (PR #2120)
+
+Changes:
+
+-   Enable guest access for the 3pl/3pid APIs (PR #1986)
+-   Add setting to support TURN for guests (PR #2011)
+-   Various performance improvements (PR #2075, #2076, #2080, #2083, #2108, #2158, #2176, #2185)
+-   Make synctl a bit more user friendly (PR #2078, #2127) Thanks @APwhitehat!
+-   Replace HTTP replication with TCP replication (PR #2082, #2097, #2098, #2099, #2103, #2014, #2016, #2115, #2116, #2117)
+-   Support authenticated SMTP (PR #2102) Thanks @DanielDent!
+-   Add a counter metric for successfully-sent transactions (PR #2121)
+-   Propagate errors sensibly from proxied IS requests (PR #2147)
+-   Add more granular event send metrics (PR #2178)
+
+Bug fixes:
+
+-   Fix nuke-room script to work with current schema (PR #1927) Thanks @zuckschwerdt!
+-   Fix db port script to not assume postgres tables are in the public schema (PR #2024) Thanks @jerrykan!
+-   Fix getting latest device IP for user with no devices (PR #2118)
+-   Fix rejection of invites to unreachable servers (PR #2145)
+-   Fix code for reporting old verify keys in synapse (PR #2156)
+-   Fix invite state to always include all events (PR #2163)
+-   Fix bug where synapse would always fetch state for any missing event (PR #2170)
+-   Fix a leak with timed out HTTP connections (PR #2180)
+-   Fix bug where we didn\'t time out HTTP requests to ASes (PR #2192)
+
+Docs:
+
+-   Clarify doc for SQLite to PostgreSQL port (PR #1961) Thanks @benhylau!
+-   Fix typo in synctl help (PR #2107) Thanks @HarHarLinks!
+-   `web_client_location` documentation fix (PR #2131) Thanks @matthewjwolff!
+-   Update README.rst with FreeBSD changes (PR #2132) Thanks @feld!
+-   Clarify setting up metrics (PR #2149) Thanks @encks!
+
+Changes in synapse v0.20.0 (2017-04-11)
+=======================================
+
+Bug fixes:
+
+-   Fix joining rooms over federation where not all servers in the room saw the new server had joined (PR #2094)
+
+Changes in synapse v0.20.0-rc1 (2017-03-30)
+===========================================
+
+Features:
+
+-   Add delete\_devices API (PR #1993)
+-   Add phone number registration/login support (PR #1994, #2055)
+
+Changes:
+
+-   Use JSONSchema for validation of filters. Thanks @pik! (PR #1783)
+-   Reread log config on SIGHUP (PR #1982)
+-   Speed up public room list (PR #1989)
+-   Add helpful texts to logger config options (PR #1990)
+-   Minor `/sync` performance improvements. (PR #2002, #2013, #2022)
+-   Add some debug to help diagnose weird federation issue (PR #2035)
+-   Correctly limit retries for all federation requests (PR #2050, #2061)
+-   Don\'t lock table when persisting new one time keys (PR #2053)
+-   Reduce some CPU work on DB threads (PR #2054)
+-   Cache hosts in room (PR #2060)
+-   Batch sending of device list pokes (PR #2063)
+-   Speed up persist event path in certain edge cases (PR #2070)
+
+Bug fixes:
+
+-   Fix bug where current\_state\_events renamed to current\_state\_ids (PR #1849)
+-   Fix routing loop when fetching remote media (PR #1992)
+-   Fix current\_state\_events table to not lie (PR #1996)
+-   Fix CAS login to handle PartialDownloadError (PR #1997)
+-   Fix assertion to stop transaction queue getting wedged (PR #2010)
+-   Fix presence to fallback to last\_active\_ts if it beats the last sync time. Thanks @Half-Shot! (PR #2014)
+-   Fix bug when federation received a PDU while a room join is in progress (PR #2016)
+-   Fix resetting state on rejected events (PR #2025)
+-   Fix installation issues in readme. Thanks @ricco386 (PR #2037)
+-   Fix caching of remote servers\' signature keys (PR #2042)
+-   Fix some leaking log context (PR #2048, #2049, #2057, #2058)
+-   Fix rejection of invites not reaching sync (PR #2056)
+
+Changes in synapse v0.19.3 (2017-03-20)
+=======================================
+
+No changes since v0.19.3-rc2
+
+Changes in synapse v0.19.3-rc2 (2017-03-13)
+===========================================
+
+Bug fixes:
+
+-   Fix bug in handling of incoming device list updates over federation.
+
+Changes in synapse v0.19.3-rc1 (2017-03-08)
+===========================================
+
+Features:
+
+-   Add some administration functionalities. Thanks to morteza-araby! (PR #1784)
+
+Changes:
+
+-   Reduce database table sizes (PR #1873, #1916, #1923, #1963)
+-   Update contrib/ to not use syutil. Thanks to andrewshadura! (PR #1907)
+-   Don\'t fetch current state when sending an event in common case (PR #1955)
+
+Bug fixes:
+
+-   Fix synapse\_port\_db failure. Thanks to Pneumaticat! (PR #1904)
+-   Fix caching to not cache error responses (PR #1913)
+-   Fix APIs to make kick & ban reasons work (PR #1917)
+-   Fix bugs in the /keys/changes api (PR #1921)
+-   Fix bug where users couldn\'t forget rooms they were banned from (PR #1922)
+-   Fix issue with long language values in pushers API (PR #1925)
+-   Fix a race in transaction queue (PR #1930)
+-   Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! (PR #1945)
+-   Fix device list update to not constantly resync (PR #1964)
+-   Fix potential for huge memory usage when getting device that have changed (PR #1969)
+
+Changes in synapse v0.19.2 (2017-02-20)
+=======================================
+
+-   Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for pointing it out! (PR #1929)
+
+Changes in synapse v0.19.1 (2017-02-09)
+=======================================
+
+-   Fix bug where state was incorrectly reset in a room when synapse received an event over federation that did not pass auth checks (PR #1892)
+
+Changes in synapse v0.19.0 (2017-02-04)
+=======================================
+
+No changes since RC 4.
+
+Changes in synapse v0.19.0-rc4 (2017-02-02)
+===========================================
+
+-   Bump cache sizes for common membership queries (PR #1879)
+
+Changes in synapse v0.19.0-rc3 (2017-02-02)
+===========================================
+
+-   Fix email push in pusher worker (PR #1875)
+-   Make presence.get\_new\_events a bit faster (PR #1876)
+-   Make /keys/changes a bit more performant (PR #1877)
+
+Changes in synapse v0.19.0-rc2 (2017-02-02)
+===========================================
+
+-   Include newly joined users in /keys/changes API (PR #1872)
+
+Changes in synapse v0.19.0-rc1 (2017-02-02)
+===========================================
+
+Features:
+
+-   Add support for specifying multiple bind addresses (PR #1709, #1712, #1795, #1835). Thanks to @kyrias!
+-   Add /account/3pid/delete endpoint (PR #1714)
+-   Add config option to configure the Riot URL used in notification emails (PR #1811). Thanks to @aperezdc!
+-   Add username and password config options for turn server (PR #1832). Thanks to @xsteadfastx!
+-   Implement device lists updates over federation (PR #1857, #1861, #1864)
+-   Implement /keys/changes (PR #1869, #1872)
+
+Changes:
+
+-   Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph!
+-   Log which files we saved attachments to in the media\_repository (PR #1791)
+-   Linearize updates to membership via PUT /state/ to better handle multiple joins (PR #1787)
+-   Limit number of entries to prefill from cache on startup (PR #1792)
+-   Remove full\_twisted\_stacktraces option (PR #1802)
+-   Measure size of some caches by sum of the size of cached values (PR #1815)
+-   Measure metrics of string\_cache (PR #1821)
+-   Reduce logging verbosity (PR #1822, #1823, #1824)
+-   Don\'t clobber a displayname or avatar\_url if provided by an m.room.member event (PR #1852)
+-   Better handle 401/404 response for federation /send/ (PR #1866, #1871)
+
+Fixes:
+
+-   Fix ability to change password to a non-ascii one (PR #1711)
+-   Fix push getting stuck due to looking at the wrong view of state (PR #1820)
+-   Fix email address comparison to be case insensitive (PR #1827)
+-   Fix occasional inconsistencies of room membership (PR #1836, #1840)
+
+Performance:
+
+-   Don\'t block messages sending on bumping presence (PR #1789)
+-   Change device\_inbox stream index to include user (PR #1793)
+-   Optimise state resolution (PR #1818)
+-   Use DB cache of joined users for presence (PR #1862)
+-   Add an index to make membership queries faster (PR #1867)
+
+Changes in synapse v0.18.7 (2017-01-09)
+=======================================
+
+No changes from v0.18.7-rc2
+
+Changes in synapse v0.18.7-rc2 (2017-01-07)
+===========================================
+
+Bug fixes:
+
+-   Fix error in rc1\'s discarding invalid inbound traffic logic that was incorrectly discarding missing events
+
+Changes in synapse v0.18.7-rc1 (2017-01-06)
+===========================================
+
+Bug fixes:
+
+-   Fix error in \#PR 1764 to actually fix the nightmare \#1753 bug.
+-   Improve deadlock logging further
+-   Discard inbound federation traffic from invalid domains, to immunise against \#1753
+
+Changes in synapse v0.18.6 (2017-01-06)
+=======================================
+
+Bug fixes:
+
+-   Fix bug when checking if a guest user is allowed to join a room (PR #1772) Thanks to Patrik Oldsberg for diagnosing and the fix!
+
+Changes in synapse v0.18.6-rc3 (2017-01-05)
+===========================================
+
+Bug fixes:
+
+-   Fix bug where we failed to send ban events to the banned server (PR #1758)
+-   Fix bug where we sent event that didn\'t originate on this server to other servers (PR #1764)
+-   Fix bug where processing an event from a remote server took a long time because we were making long HTTP requests (PR #1765, PR #1744)
+
+Changes:
+
+-   Improve logging for debugging deadlocks (PR #1766, PR #1767)
+
+Changes in synapse v0.18.6-rc2 (2016-12-30)
+===========================================
+
+Bug fixes:
+
+-   Fix memory leak in twisted by initialising logging correctly (PR #1731)
+-   Fix bug where fetching missing events took an unacceptable amount of time in large rooms (PR #1734)
+
+Changes in synapse v0.18.6-rc1 (2016-12-29)
+===========================================
+
+Bug fixes:
+
+-   Make sure that outbound connections are closed (PR #1725)
+
+Changes in synapse v0.18.5 (2016-12-16)
+=======================================
+
+Bug fixes:
+
+-   Fix federation /backfill returning events it shouldn\'t (PR #1700)
+-   Fix crash in url preview (PR #1701)
+
+Changes in synapse v0.18.5-rc3 (2016-12-13)
+===========================================
+
+Features:
+
+-   Add support for E2E for guests (PR #1653)
+-   Add new API appservice specific public room list (PR #1676)
+-   Add new room membership APIs (PR #1680)
+
+Changes:
+
+-   Enable guest access for private rooms by default (PR #653)
+-   Limit the number of events that can be created on a given room concurrently (PR #1620)
+-   Log the args that we have on UI auth completion (PR #1649)
+-   Stop generating refresh\_tokens (PR #1654)
+-   Stop putting a time caveat on access tokens (PR #1656)
+-   Remove unspecced GET endpoints for e2e keys (PR #1694)
+
+Bug fixes:
+
+-   Fix handling of 500 and 429\'s over federation (PR #1650)
+-   Fix Content-Type header parsing (PR #1660)
+-   Fix error when previewing sites that include unicode, thanks to kyrias (PR #1664)
+-   Fix some cases where we drop read receipts (PR #1678)
+-   Fix bug where calls to `/sync` didn\'t correctly timeout (PR #1683)
+-   Fix bug where E2E key query would fail if a single remote host failed (PR #1686)
+
+Changes in synapse v0.18.5-rc2 (2016-11-24)
+===========================================
+
+Bug fixes:
+
+-   Don\'t send old events over federation, fixes bug in -rc1.
+
+Changes in synapse v0.18.5-rc1 (2016-11-24)
+===========================================
+
+Features:
+
+-   Implement \"event\_fields\" in filters (PR #1638)
+
+Changes:
+
+-   Use external ldap auth pacakge (PR #1628)
+-   Split out federation transaction sending to a worker (PR #1635)
+-   Fail with a coherent error message if /sync?filter= is invalid (PR #1636)
+-   More efficient notif count queries (PR #1644)
+
+Changes in synapse v0.18.4 (2016-11-22)
+=======================================
+
+Bug fixes:
+
+-   Add workaround for buggy clients that the fail to register (PR #1632)
+
+Changes in synapse v0.18.4-rc1 (2016-11-14)
+===========================================
+
+Changes:
+
+-   Various database efficiency improvements (PR #1188, #1192)
+-   Update default config to blacklist more internal IPs, thanks to Euan Kemp (PR #1198)
+-   Allow specifying duration in minutes in config, thanks to Daniel Dent (PR #1625)
+
+Bug fixes:
+
+-   Fix media repo to set CORs headers on responses (PR #1190)
+-   Fix registration to not error on non-ascii passwords (PR #1191)
+-   Fix create event code to limit the number of prev\_events (PR #1615)
+-   Fix bug in transaction ID deduplication (PR #1624)
+
+Changes in synapse v0.18.3 (2016-11-08)
+=======================================
+
+SECURITY UPDATE
+
+Explicitly require authentication when using LDAP3. This is the default on versions of `ldap3` above 1.0, but some distributions will package an older version.
+
+If you are using LDAP3 login and have a version of `ldap3` older than 1.0 it is **CRITICAL to updgrade**.
+
+Changes in synapse v0.18.2 (2016-11-01)
+=======================================
+
+No changes since v0.18.2-rc5
+
+Changes in synapse v0.18.2-rc5 (2016-10-28)
+===========================================
+
+Bug fixes:
+
+-   Fix prometheus process metrics in worker processes (PR #1184)
+
+Changes in synapse v0.18.2-rc4 (2016-10-27)
+===========================================
+
+Bug fixes:
+
+-   Fix `user_threepids` schema delta, which in some instances prevented startup after upgrade (PR #1183)
+
+Changes in synapse v0.18.2-rc3 (2016-10-27)
+===========================================
+
+Changes:
+
+-   Allow clients to supply access tokens as headers (PR #1098)
+-   Clarify error codes for GET /filter/, thanks to Alexander Maznev (PR #1164)
+-   Make password reset email field case insensitive (PR #1170)
+-   Reduce redundant database work in email pusher (PR #1174)
+-   Allow configurable rate limiting per AS (PR #1175)
+-   Check whether to ratelimit sooner to avoid work (PR #1176)
+-   Standardise prometheus metrics (PR #1177)
+
+Bug fixes:
+
+-   Fix incredibly slow back pagination query (PR #1178)
+-   Fix infinite typing bug (PR #1179)
+
+Changes in synapse v0.18.2-rc2 (2016-10-25)
+===========================================
+
+(This release did not include the changes advertised and was identical to RC1)
+
+Changes in synapse v0.18.2-rc1 (2016-10-17)
+===========================================
+
+Changes:
+
+-   Remove redundant event\_auth index (PR #1113)
+-   Reduce DB hits for replication (PR #1141)
+-   Implement pluggable password auth (PR #1155)
+-   Remove rate limiting from app service senders and fix get\_or\_create\_user requester, thanks to Patrik Oldsberg (PR #1157)
+-   window.postmessage for Interactive Auth fallback (PR #1159)
+-   Use sys.executable instead of hardcoded python, thanks to Pedro Larroy (PR #1162)
+-   Add config option for adding additional TLS fingerprints (PR #1167)
+-   User-interactive auth on delete device (PR #1168)
+
+Bug fixes:
+
+-   Fix not being allowed to set your own state\_key, thanks to Patrik Oldsberg (PR #1150)
+-   Fix interactive auth to return 401 from for incorrect password (PR #1160, #1166)
+-   Fix email push notifs being dropped (PR #1169)
+
+Changes in synapse v0.18.1 (2016-10-05)
+=======================================
+
+No changes since v0.18.1-rc1
+
+Changes in synapse v0.18.1-rc1 (2016-09-30)
+===========================================
+
+Features:
+
+-   Add total\_room\_count\_estimate to `/publicRooms` (PR #1133)
+
+Changes:
+
+-   Time out typing over federation (PR #1140)
+-   Restructure LDAP authentication (PR #1153)
+
+Bug fixes:
+
+-   Fix 3pid invites when server is already in the room (PR #1136)
+-   Fix upgrading with SQLite taking lots of CPU for a few days after upgrade (PR #1144)
+-   Fix upgrading from very old database versions (PR #1145)
+-   Fix port script to work with recently added tables (PR #1146)
+
+Changes in synapse v0.18.0 (2016-09-19)
+=======================================
+
+The release includes major changes to the state storage database schemas, which significantly reduce database size. Synapse will attempt to upgrade the current data in the background. Servers with large SQLite database may experience degradation of performance while this upgrade is in progress, therefore you may want to consider migrating to using Postgres before upgrading very large SQLite databases
+
+Changes:
+
+-   Make public room search case insensitive (PR #1127)
+
+Bug fixes:
+
+-   Fix and clean up publicRooms pagination (PR #1129)
+
+Changes in synapse v0.18.0-rc1 (2016-09-16)
+===========================================
+
+Features:
+
+-   Add `only=highlight` on `/notifications` (PR #1081)
+-   Add server param to /publicRooms (PR #1082)
+-   Allow clients to ask for the whole of a single state event (PR #1094)
+-   Add is\_direct param to /createRoom (PR #1108)
+-   Add pagination support to publicRooms (PR #1121)
+-   Add very basic filter API to /publicRooms (PR #1126)
+-   Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104, #1111)
+
+Changes:
+
+-   Move to storing state\_groups\_state as deltas, greatly reducing DB size (PR #1065)
+-   Reduce amount of state pulled out of the DB during common requests (PR #1069)
+-   Allow PDF to be rendered from media repo (PR #1071)
+-   Reindex state\_groups\_state after pruning (PR #1085)
+-   Clobber EDUs in send queue (PR #1095)
+-   Conform better to the CAS protocol specification (PR #1100)
+-   Limit how often we ask for keys from dead servers (PR #1114)
+
+Bug fixes:
+
+-   Fix /notifications API when used with `from` param (PR #1080)
+-   Fix backfill when cannot find an event. (PR #1107)
+
+Changes in synapse v0.17.3 (2016-09-09)
+=======================================
+
+This release fixes a major bug that stopped servers from handling rooms with over 1000 members.
+
+Changes in synapse v0.17.2 (2016-09-08)
+=======================================
+
+This release contains security bug fixes. Please upgrade.
+
+No changes since v0.17.2-rc1
+
+Changes in synapse v0.17.2-rc1 (2016-09-05)
+===========================================
+
+Features:
+
+-   Start adding store-and-forward direct-to-device messaging (PR #1046, #1050, #1062, #1066)
+
+Changes:
+
+-   Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063, #1068)
+-   Don\'t notify for online to online presence transitions. (PR #1054)
+-   Occasionally persist unpersisted presence updates (PR #1055)
+-   Allow application services to have an optional \'url\' (PR #1056)
+-   Clean up old sent transactions from DB (PR #1059)
+
+Bug fixes:
+
+-   Fix None check in backfill (PR #1043)
+-   Fix membership changes to be idempotent (PR #1067)
+-   Fix bug in get\_pdu where it would sometimes return events with incorrect signature
+
+Changes in synapse v0.17.1 (2016-08-24)
+=======================================
+
+Changes:
+
+-   Delete old received\_transactions rows (PR #1038)
+-   Pass through user-supplied content in /join/\$room\_id (PR #1039)
+
+Bug fixes:
+
+-   Fix bug with backfill (PR #1040)
+
+Changes in synapse v0.17.1-rc1 (2016-08-22)
+===========================================
+
+Features:
+
+-   Add notification API (PR #1028)
+
+Changes:
+
+-   Don\'t print stack traces when failing to get remote keys (PR #996)
+-   Various federation /event/ perf improvements (PR #998)
+-   Only process one local membership event per room at a time (PR #1005)
+-   Move default display name push rule (PR #1011, #1023)
+-   Fix up preview URL API. Add tests. (PR #1015)
+-   Set `Content-Security-Policy` on media repo (PR #1021)
+-   Make notify\_interested\_services faster (PR #1022)
+-   Add usage stats to prometheus monitoring (PR #1037)
+
+Bug fixes:
+
+-   Fix token login (PR #993)
+-   Fix CAS login (PR #994, #995)
+-   Fix /sync to not clobber status\_msg (PR #997)
+-   Fix redacted state events to include prev\_content (PR #1003)
+-   Fix some bugs in the auth/ldap handler (PR #1007)
+-   Fix backfill request to limit URI length, so that remotes don\'t reject the requests due to path length limits (PR #1012)
+-   Fix AS push code to not send duplicate events (PR #1025)
+
+Changes in synapse v0.17.0 (2016-08-08)
+=======================================
+
+This release contains significant security bug fixes regarding authenticating events received over federation. PLEASE UPGRADE.
+
+This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details.
+
+Changes:
+
+-   Add federation /version API (PR #990)
+-   Make psutil dependency optional (PR #992)
+
+Bug fixes:
+
+-   Fix URL preview API to exclude HTML comments in description (PR #988)
+-   Fix error handling of remote joins (PR #991)
+
+Changes in synapse v0.17.0-rc4 (2016-08-05)
+===========================================
+
+Changes:
+
+-   Change the way we summarize URLs when previewing (PR #973)
+-   Add new `/state_ids/` federation API (PR #979)
+-   Speed up processing of `/state/` response (PR #986)
+
+Bug fixes:
+
+-   Fix event persistence when event has already been partially persisted (PR #975, #983, #985)
+-   Fix port script to also copy across backfilled events (PR #982)
+
+Changes in synapse v0.17.0-rc3 (2016-08-02)
+===========================================
+
+Changes:
+
+-   Forbid non-ASes from registering users whose names begin with \'\_\' (PR #958)
+-   Add some basic admin API docs (PR #963)
+
+Bug fixes:
+
+-   Send the correct host header when fetching keys (PR #941)
+-   Fix joining a room that has missing auth events (PR #964)
+-   Fix various push bugs (PR #966, #970)
+-   Fix adding emails on registration (PR #968)
+
+Changes in synapse v0.17.0-rc2 (2016-08-02)
+===========================================
+
+(This release did not include the changes advertised and was identical to RC1)
+
+Changes in synapse v0.17.0-rc1 (2016-07-28)
+===========================================
+
+This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details.
+
+Features:
+
+-   Add purge\_media\_cache admin API (PR #902)
+-   Add deactivate account admin API (PR #903)
+-   Add optional pepper to password hashing (PR #907, #910 by KentShikama)
+-   Add an admin option to shared secret registration (breaks backwards compat) (PR #909)
+-   Add purge local room history API (PR #911, #923, #924)
+-   Add requestToken endpoints (PR #915)
+-   Add an /account/deactivate endpoint (PR #921)
+-   Add filter param to /messages. Add \'contains\_url\' to filter. (PR #922)
+-   Add device\_id support to /login (PR #929)
+-   Add device\_id support to /v2/register flow. (PR #937, #942)
+-   Add GET /devices endpoint (PR #939, #944)
+-   Add GET /device/{deviceId} (PR #943)
+-   Add update and delete APIs for devices (PR #949)
+
+Changes:
+
+-   Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
+-   Linearize some federation endpoints based on (origin, room\_id) (PR #879)
+-   Remove the legacy v0 content upload API. (PR #888)
+-   Use similar naming we use in email notifs for push (PR #894)
+-   Optionally include password hash in createUser endpoint (PR #905 by KentShikama)
+-   Use a query that postgresql optimises better for get\_events\_around (PR #906)
+-   Fall back to \'username\' if \'user\' is not given for appservice registration. (PR #927 by Half-Shot)
+-   Add metrics for psutil derived memory usage (PR #936)
+-   Record device\_id in client\_ips (PR #938)
+-   Send the correct host header when fetching keys (PR #941)
+-   Log the hostname the reCAPTCHA was completed on (PR #946)
+-   Make the device id on e2e key upload optional (PR #956)
+-   Add r0.2.0 to the \"supported versions\" list (PR #960)
+-   Don\'t include name of room for invites in push (PR #961)
+
+Bug fixes:
+
+-   Fix substitution failure in mail template (PR #887)
+-   Put most recent 20 messages in email notif (PR #892)
+-   Ensure that the guest user is in the database when upgrading accounts (PR #914)
+-   Fix various edge cases in auth handling (PR #919)
+-   Fix 500 ISE when sending alias event without a state\_key (PR #925)
+-   Fix bug where we stored rejections in the state\_group, persist all rejections (PR #948)
+-   Fix lack of check of if the user is banned when handling 3pid invites (PR #952)
+-   Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
+
+Changes in synapse v0.16.1-r1 (2016-07-08)
+==========================================
+
+THIS IS A CRITICAL SECURITY UPDATE.
+
+This fixes a bug which allowed users\' accounts to be accessed by unauthorised users.
+
+Changes in synapse v0.16.1 (2016-06-20)
+=======================================
+
+Bug fixes:
+
+-   Fix assorted bugs in `/preview_url` (PR #872)
+-   Fix TypeError when setting unicode passwords (PR #873)
+
+Performance improvements:
+
+-   Turn `use_frozen_events` off by default (PR #877)
+-   Disable responding with canonical json for federation (PR #878)
+
+Changes in synapse v0.16.1-rc1 (2016-06-15)
+===========================================
+
+Features: None
+
+Changes:
+
+-   Log requester for `/publicRoom` endpoints when possible (PR #856)
+-   502 on `/thumbnail` when can\'t connect to remote server (PR #862)
+-   Linearize fetching of gaps on incoming events (PR #871)
+
+Bugs fixes:
+
+-   Fix bug where rooms where marked as published by default (PR #857)
+-   Fix bug where joining room with an event with invalid sender (PR #868)
+-   Fix bug where backfilled events were sent down sync streams (PR #869)
+-   Fix bug where outgoing connections could wedge indefinitely, causing push notifications to be unreliable (PR #870)
+
+Performance improvements:
+
+-   Improve `/publicRooms` performance(PR #859)
+
+Changes in synapse v0.16.0 (2016-06-09)
+=======================================
+
+NB: As of v0.14 all AS config files must have an ID field.
+
+Bug fixes:
+
+-   Don\'t make rooms published by default (PR #857)
+
+Changes in synapse v0.16.0-rc2 (2016-06-08)
+===========================================
+
+Features:
+
+-   Add configuration option for tuning GC via `gc.set_threshold` (PR #849)
+
+Changes:
+
+-   Record metrics about GC (PR #771, #847, #852)
+-   Add metric counter for number of persisted events (PR #841)
+
+Bug fixes:
+
+-   Fix \'From\' header in email notifications (PR #843)
+-   Fix presence where timeouts were not being fired for the first 8h after restarts (PR #842)
+-   Fix bug where synapse sent malformed transactions to AS\'s when retrying transactions (Commits 310197b, 8437906)
+
+Performance improvements:
+
+-   Remove event fetching from DB threads (PR #835)
+-   Change the way we cache events (PR #836)
+-   Add events to cache when we persist them (PR #840)
+
+Changes in synapse v0.16.0-rc1 (2016-06-03)
+===========================================
+
+Version 0.15 was not released. See v0.15.0-rc1 below for additional changes.
+
+Features:
+
+-   Add email notifications for missed messages (PR #759, #786, #799, #810, #815, #821)
+-   Add a `url_preview_ip_range_whitelist` config param (PR #760)
+-   Add /report endpoint (PR #762)
+-   Add basic ignore user API (PR #763)
+-   Add an openidish mechanism for proving that you own a given user\_id (PR #765)
+-   Allow clients to specify a server\_name to avoid \'No known servers\' (PR #794)
+-   Add secondary\_directory\_servers option to fetch room list from other servers (PR #808, #813)
+
+Changes:
+
+-   Report per request metrics for all of the things using request\_handler (PR #756)
+-   Correctly handle `NULL` password hashes from the database (PR #775)
+-   Allow receipts for events we haven\'t seen in the db (PR #784)
+-   Make synctl read a cache factor from config file (PR #785)
+-   Increment badge count per missed convo, not per msg (PR #793)
+-   Special case m.room.third\_party\_invite event auth to match invites (PR #814)
+
+Bug fixes:
+
+-   Fix typo in event\_auth servlet path (PR #757)
+-   Fix password reset (PR #758)
+
+Performance improvements:
+
+-   Reduce database inserts when sending transactions (PR #767)
+-   Queue events by room for persistence (PR #768)
+-   Add cache to `get_user_by_id` (PR #772)
+-   Add and use `get_domain_from_id` (PR #773)
+-   Use tree cache for `get_linearized_receipts_for_room` (PR #779)
+-   Remove unused indices (PR #782)
+-   Add caches to `bulk_get_push_rules*` (PR #804)
+-   Cache `get_event_reference_hashes` (PR #806)
+-   Add `get_users_with_read_receipts_in_room` cache (PR #809)
+-   Use state to calculate `get_users_in_room` (PR #811)
+-   Load push rules in storage layer so that they get cached (PR #825)
+-   Make `get_joined_hosts_for_room` use get\_users\_in\_room (PR #828)
+-   Poke notifier on next reactor tick (PR #829)
+-   Change CacheMetrics to be quicker (PR #830)
+
+Changes in synapse v0.15.0-rc1 (2016-04-26)
+===========================================
+
+Features:
+
+-   Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck (PR #671,\#687)
+-   Add URL previewing support (PR #688)
+-   Add login support for LDAP, thanks to Christoph Witzany (PR #701)
+-   Add GET endpoint for pushers (PR #716)
+
+Changes:
+
+-   Never notify for member events (PR #667)
+-   Deduplicate identical `/sync` requests (PR #668)
+-   Require user to have left room to forget room (PR #673)
+-   Use DNS cache if within TTL (PR #677)
+-   Let users see their own leave events (PR #699)
+-   Deduplicate membership changes (PR #700)
+-   Increase performance of pusher code (PR #705)
+-   Respond with error status 504 if failed to talk to remote server (PR #731)
+-   Increase search performance on postgres (PR #745)
+
+Bug fixes:
+
+-   Fix bug where disabling all notifications still resulted in push (PR #678)
+-   Fix bug where users couldn\'t reject remote invites if remote refused (PR #691)
+-   Fix bug where synapse attempted to backfill from itself (PR #693)
+-   Fix bug where profile information was not correctly added when joining remote rooms (PR #703)
+-   Fix bug where register API required incorrect key name for AS registration (PR #727)
+
+Changes in synapse v0.14.0 (2016-03-30)
+=======================================
+
+No changes from v0.14.0-rc2
+
+Changes in synapse v0.14.0-rc2 (2016-03-23)
+===========================================
+
+Features:
+
+-   Add published room list API (PR #657)
+
+Changes:
+
+-   Change various caches to consume less memory (PR #656, #658, #660, #662, #663, #665)
+-   Allow rooms to be published without requiring an alias (PR #664)
+-   Intern common strings in caches to reduce memory footprint (\#666)
+
+Bug fixes:
+
+-   Fix reject invites over federation (PR #646)
+-   Fix bug where registration was not idempotent (PR #649)
+-   Update aliases event after deleting aliases (PR #652)
+-   Fix unread notification count, which was sometimes wrong (PR #661)
+
+Changes in synapse v0.14.0-rc1 (2016-03-14)
+===========================================
+
+Features:
+
+-   Add event\_id to response to state event PUT (PR #581)
+-   Allow guest users access to messages in rooms they have joined (PR #587)
+-   Add config for what state is included in a room invite (PR #598)
+-   Send the inviter\'s member event in room invite state (PR #607)
+-   Add error codes for malformed/bad JSON in /login (PR #608)
+-   Add support for changing the actions for default rules (PR #609)
+-   Add environment variable SYNAPSE\_CACHE\_FACTOR, default it to 0.1 (PR #612)
+-   Add ability for alias creators to delete aliases (PR #614)
+-   Add profile information to invites (PR #624)
+
+Changes:
+
+-   Enforce user\_id exclusivity for AS registrations (PR #572)
+-   Make adding push rules idempotent (PR #587)
+-   Improve presence performance (PR #582, #586)
+-   Change presence semantics for `last_active_ago` (PR #582, #586)
+-   Don\'t allow `m.room.create` to be changed (PR #596)
+-   Add 800x600 to default list of valid thumbnail sizes (PR #616)
+-   Always include kicks and bans in full /sync (PR #625)
+-   Send history visibility on boundary changes (PR #626)
+-   Register endpoint now returns a refresh\_token (PR #637)
+
+Bug fixes:
+
+-   Fix bug where we returned incorrect state in /sync (PR #573)
+-   Always return a JSON object from push rule API (PR #606)
+-   Fix bug where registering without a user id sometimes failed (PR #610)
+-   Report size of ExpiringCache in cache size metrics (PR #611)
+-   Fix rejection of invites to empty rooms (PR #615)
+-   Fix usage of `bcrypt` to not use `checkpw` (PR #619)
+-   Pin `pysaml2` dependency (PR #634)
+-   Fix bug in `/sync` where timeline order was incorrect for backfilled events (PR #635)
+
+Changes in synapse v0.13.3 (2016-02-11)
+=======================================
+
+-   Fix bug where `/sync` would occasionally return events in the wrong room.
+
+Changes in synapse v0.13.2 (2016-02-11)
+=======================================
+
+-   Fix bug where `/events` would fail to skip some events if there had been more events than the limit specified since the last request (PR #570)
+
+Changes in synapse v0.13.1 (2016-02-10)
+=======================================
+
+-   Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to pull in the fix for SYWEB-361 so that the default client can display HTML messages again(!)
+
+Changes in synapse v0.13.0 (2016-02-10)
+=======================================
+
+This version includes an upgrade of the schema, specifically adding an index to the `events` table. This may cause synapse to pause for several minutes the first time it is started after the upgrade.
+
+Changes:
+
+-   Improve general performance (PR #540, #543. \#544, #54, #549, #567)
+-   Change guest user ids to be incrementing integers (PR #550)
+-   Improve performance of public room list API (PR #552)
+-   Change profile API to omit keys rather than return null (PR #557)
+-   Add `/media/r0` endpoint prefix, which is equivalent to `/media/v1/` (PR #595)
+
+Bug fixes:
+
+-   Fix bug with upgrading guest accounts where it would fail if you opened the registration email on a different device (PR #547)
+-   Fix bug where unread count could be wrong (PR #568)
+
+Changes in synapse v0.12.1-rc1 (2016-01-29)
+===========================================
+
+Features:
+
+-   Add unread notification counts in `/sync` (PR #456)
+-   Add support for inviting 3pids in `/createRoom` (PR #460)
+-   Add ability for guest accounts to upgrade (PR #462)
+-   Add `/versions` API (PR #468)
+-   Add `event` to `/context` API (PR #492)
+-   Add specific error code for invalid user names in `/register` (PR #499)
+-   Add support for push badge counts (PR #507)
+-   Add support for non-guest users to peek in rooms using `/events` (PR #510)
+
+Changes:
+
+-   Change `/sync` so that guest users only get rooms they\'ve joined (PR #469)
+-   Change to require unbanning before other membership changes (PR #501)
+-   Change default push rules to notify for all messages (PR #486)
+-   Change default push rules to not notify on membership changes (PR #514)
+-   Change default push rules in one to one rooms to only notify for events that are messages (PR #529)
+-   Change `/sync` to reject requests with a `from` query param (PR #512)
+-   Change server manhole to use SSH rather than telnet (PR #473)
+-   Change server to require AS users to be registered before use (PR #487)
+-   Change server not to start when ASes are invalidly configured (PR #494)
+-   Change server to require ID and `as_token` to be unique for AS\'s (PR #496)
+-   Change maximum pagination limit to 1000 (PR #497)
+
+Bug fixes:
+
+-   Fix bug where `/sync` didn\'t return when something under the leave key changed (PR #461)
+-   Fix bug where we returned smaller rather than larger than requested thumbnails when `method=crop` (PR #464)
+-   Fix thumbnails API to only return cropped thumbnails when asking for a cropped thumbnail (PR #475)
+-   Fix bug where we occasionally still logged access tokens (PR #477)
+-   Fix bug where `/events` would always return immediately for guest users (PR #480)
+-   Fix bug where `/sync` unexpectedly returned old left rooms (PR #481)
+-   Fix enabling and disabling push rules (PR #498)
+-   Fix bug where `/register` returned 500 when given unicode username (PR #513)
+
+Changes in synapse v0.12.0 (2016-01-04)
+=======================================
+
+-   Expose `/login` under `r0` (PR #459)
+
+Changes in synapse v0.12.0-rc3 (2015-12-23)
+===========================================
+
+-   Allow guest accounts access to `/sync` (PR #455)
+-   Allow filters to include/exclude rooms at the room level rather than just from the components of the sync for each room. (PR #454)
+-   Include urls for room avatars in the response to `/publicRooms` (PR #453)
+-   Don\'t set a identicon as the avatar for a user when they register (PR #450)
+-   Add a `display_name` to third-party invites (PR #449)
+-   Send more information to the identity server for third-party invites so that it can send richer messages to the invitee (PR #446)
+-   Cache the responses to `/initialSync` for 5 minutes. If a client retries a request to `/initialSync` before the a response was computed to the first request then the same response is used for both requests (PR #457)
+-   Fix a bug where synapse would always request the signing keys of remote servers even when the key was cached locally (PR #452)
+-   Fix 500 when pagination search results (PR #447)
+-   Fix a bug where synapse was leaking raw email address in third-party invites (PR #448)
+
+Changes in synapse v0.12.0-rc2 (2015-12-14)
+===========================================
+
+-   Add caches for whether rooms have been forgotten by a user (PR #434)
+-   Remove instructions to use `--process-dependency-link` since all of the dependencies of synapse are on PyPI (PR #436)
+-   Parallelise the processing of `/sync` requests (PR #437)
+-   Fix race updating presence in `/events` (PR #444)
+-   Fix bug back-populating search results (PR #441)
+-   Fix bug calculating state in `/sync` requests (PR #442)
+
+Changes in synapse v0.12.0-rc1 (2015-12-10)
+===========================================
+
+-   Host the client APIs released as r0 by <https://matrix.org/docs/spec/r0.0.0/client_server.html> on paths prefixed by `/_matrix/client/r0`. (PR #430, PR #415, PR #400)
+-   Updates the client APIs to match r0 of the matrix specification.
+    -   All APIs return events in the new event format, old APIs also include the fields needed to parse the event using the old format for compatibility. (PR #402)
+    -   Search results are now given as a JSON array rather than a JSON object (PR #405)
+    -   Miscellaneous changes to search (PR #403, PR #406, PR #412)
+    -   Filter JSON objects may now be passed as query parameters to `/sync` (PR #431)
+    -   Fix implementation of `/admin/whois` (PR #418)
+    -   Only include the rooms that user has left in `/sync` if the client requests them in the filter (PR #423)
+    -   Don\'t push for `m.room.message` by default (PR #411)
+    -   Add API for setting per account user data (PR #392)
+    -   Allow users to forget rooms (PR #385)
+-   Performance improvements and monitoring:
+    -   Add per-request counters for CPU time spent on the main python thread. (PR #421, PR #420)
+    -   Add per-request counters for time spent in the database (PR #429)
+    -   Make state updates in the C+S API idempotent (PR #416)
+    -   Only fire `user_joined_room` if the user has actually joined. (PR #410)
+    -   Reuse a single http client, rather than creating new ones (PR #413)
+-   Fixed a bug upgrading from older versions of synapse on postgresql (PR #417)
+
+Changes in synapse v0.11.1 (2015-11-20)
+=======================================
+
+-   Add extra options to search API (PR #394)
+-   Fix bug where we did not correctly cap federation retry timers. This meant it could take several hours for servers to start talking to ressurected servers, even when they were receiving traffic from them (PR #393)
+-   Don\'t advertise login token flow unless CAS is enabled. This caused issues where some clients would always use the fallback API if they did not recognize all login flows (PR #391)
+-   Change /v2 sync API to rename `private_user_data` to `account_data` (PR #386)
+-   Change /v2 sync API to remove the `event_map` and rename keys in `rooms` object (PR #389)
+
+Changes in synapse v0.11.0-r2 (2015-11-19)
+==========================================
+
+-   Fix bug in database port script (PR #387)
+
+Changes in synapse v0.11.0-r1 (2015-11-18)
+==========================================
+
+-   Retry and fail federation requests more aggressively for requests that block client side requests (PR #384)
+
+Changes in synapse v0.11.0 (2015-11-17)
+=======================================
+
+-   Change CAS login API (PR #349)
+
+Changes in synapse v0.11.0-rc2 (2015-11-13)
+===========================================
+
+-   Various changes to /sync API response format (PR #373)
+-   Fix regression when setting display name in newly joined room over federation (PR #368)
+-   Fix problem where /search was slow when using SQLite (PR #366)
+
+Changes in synapse v0.11.0-rc1 (2015-11-11)
+===========================================
+
+-   Add Search API (PR #307, #324, #327, #336, #350, #359)
+-   Add \'archived\' state to v2 /sync API (PR #316)
+-   Add ability to reject invites (PR #317)
+-   Add config option to disable password login (PR #322)
+-   Add the login fallback API (PR #330)
+-   Add room context API (PR #334)
+-   Add room tagging support (PR #335)
+-   Update v2 /sync API to match spec (PR #305, #316, #321, #332, #337, #341)
+-   Change retry schedule for application services (PR #320)
+-   Change retry schedule for remote servers (PR #340)
+-   Fix bug where we hosted static content in the incorrect place (PR #329)
+-   Fix bug where we didn\'t increment retry interval for remote servers (PR #343)
+
+Changes in synapse v0.10.1-rc1 (2015-10-15)
+===========================================
+
+-   Add support for CAS, thanks to Steven Hammerton (PR #295, #296)
+-   Add support for using macaroons for `access_token` (PR #256, #229)
+-   Add support for `m.room.canonical_alias` (PR #287)
+-   Add support for viewing the history of rooms that they have left. (PR #276, #294)
+-   Add support for refresh tokens (PR #240)
+-   Add flag on creation which disables federation of the room (PR #279)
+-   Add some room state to invites. (PR #275)
+-   Atomically persist events when joining a room over federation (PR #283)
+-   Change default history visibility for private rooms (PR #271)
+-   Allow users to redact their own sent events (PR #262)
+-   Use tox for tests (PR #247)
+-   Split up syutil into separate libraries (PR #243)
+
+Changes in synapse v0.10.0-r2 (2015-09-16)
+==========================================
+
+-   Fix bug where we always fetched remote server signing keys instead of using ones in our cache.
+-   Fix adding threepids to an existing account.
+-   Fix bug with invinting over federation where remote server was already in the room. (PR #281, SYN-392)
+
+Changes in synapse v0.10.0-r1 (2015-09-08)
+==========================================
+
+-   Fix bug with python packaging
+
+Changes in synapse v0.10.0 (2015-09-03)
+=======================================
+
+No change from release candidate.
+
+Changes in synapse v0.10.0-rc6 (2015-09-02)
+===========================================
+
+-   Remove some of the old database upgrade scripts.
+-   Fix database port script to work with newly created sqlite databases.
+
+Changes in synapse v0.10.0-rc5 (2015-08-27)
+===========================================
+
+-   Fix bug that broke downloading files with ascii filenames across federation.
+
+Changes in synapse v0.10.0-rc4 (2015-08-27)
+===========================================
+
+-   Allow UTF-8 filenames for upload. (PR #259)
+
+Changes in synapse v0.10.0-rc3 (2015-08-25)
+===========================================
+
+-   Add `--keys-directory` config option to specify where files such as certs and signing keys should be stored in, when using `--generate-config` or `--generate-keys`. (PR #250)
+-   Allow `--config-path` to specify a directory, causing synapse to use all \*.yaml files in the directory as config files. (PR #249)
+-   Add `web_client_location` config option to specify static files to be hosted by synapse under `/_matrix/client`. (PR #245)
+-   Add helper utility to synapse to read and parse the config files and extract the value of a given key. For example:
+
+        $ python -m synapse.config read server_name -c homeserver.yaml
+        localhost
+
+    (PR #246)
+
+Changes in synapse v0.10.0-rc2 (2015-08-24)
+===========================================
+
+-   Fix bug where we incorrectly populated the `event_forward_extremities` table, resulting in problems joining large remote rooms (e.g. `#matrix:matrix.org`)
+-   Reduce the number of times we wake up pushers by not listening for presence or typing events, reducing the CPU cost of each pusher.
+
+Changes in synapse v0.10.0-rc1 (2015-08-21)
+===========================================
+
+Also see v0.9.4-rc1 changelog, which has been amalgamated into this release.
+
+General:
+
+-   Upgrade to Twisted 15 (PR #173)
+-   Add support for serving and fetching encryption keys over federation. (PR #208)
+-   Add support for logging in with email address (PR #234)
+-   Add support for new `m.room.canonical_alias` event. (PR #233)
+-   Change synapse to treat user IDs case insensitively during registration and login. (If two users already exist with case insensitive matching user ids, synapse will continue to require them to specify their user ids exactly.)
+-   Error if a user tries to register with an email already in use. (PR #211)
+-   Add extra and improve existing caches (PR #212, #219, #226, #228)
+-   Batch various storage request (PR #226, #228)
+-   Fix bug where we didn\'t correctly log the entity that triggered the request if the request came in via an application service (PR #230)
+-   Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. (PR #232)
+-   Add support for AS\'s to use v2\_alpha registration API (PR #210)
+
+Configuration:
+
+-   Add `--generate-keys` that will generate any missing cert and key files in the configuration files. This is equivalent to running `--generate-config` on an existing configuration file. (PR #220)
+-   `--generate-config` now no longer requires a `--server-name` parameter when used on existing configuration files. (PR #220)
+-   Add `--print-pidfile` flag that controls the printing of the pid to stdout of the demonised process. (PR #213)
+
+Media Repository:
+
+-   Fix bug where we picked a lower resolution image than requested. (PR #205)
+-   Add support for specifying if a the media repository should dynamically thumbnail images or not. (PR #206)
+
+Metrics:
+
+-   Add statistics from the reactor to the metrics API. (PR #224, #225)
+
+Demo Homeservers:
+
+-   Fix starting the demo homeservers without rate-limiting enabled. (PR #182)
+-   Fix enabling registration on demo homeservers (PR #223)
+
+Changes in synapse v0.9.4-rc1 (2015-07-21)
+==========================================
+
+General:
+
+-   Add basic implementation of receipts. (SPEC-99)
+-   Add support for configuration presets in room creation API. (PR #203)
+-   Add auth event that limits the visibility of history for new users. (SPEC-134)
+-   Add SAML2 login/registration support. (PR #201. Thanks Muthu Subramanian!)
+-   Add client side key management APIs for end to end encryption. (PR #198)
+-   Change power level semantics so that you cannot kick, ban or change power levels of users that have equal or greater power level than you. (SYN-192)
+-   Improve performance by bulk inserting events where possible. (PR #193)
+-   Improve performance by bulk verifying signatures where possible. (PR #194)
+
+Configuration:
+
+-   Add support for including TLS certificate chains.
+
+Media Repository:
+
+-   Add Content-Disposition headers to content repository responses. (SYN-150)
+
+Changes in synapse v0.9.3 (2015-07-01)
+======================================
+
+No changes from v0.9.3 Release Candidate 1.
+
+Changes in synapse v0.9.3-rc1 (2015-06-23)
+==========================================
+
+General:
+
+-   Fix a memory leak in the notifier. (SYN-412)
+-   Improve performance of room initial sync. (SYN-418)
+-   General improvements to logging.
+-   Remove `access_token` query params from `INFO` level logging.
+
+Configuration:
+
+-   Add support for specifying and configuring multiple listeners. (SYN-389)
+
+Application services:
+
+-   Fix bug where synapse failed to send user queries to application services.
+
+Changes in synapse v0.9.2-r2 (2015-06-15)
+=========================================
+
+Fix packaging so that schema delta python files get included in the package.
+
+Changes in synapse v0.9.2 (2015-06-12)
+======================================
+
+General:
+
+-   Use ultrajson for json (de)serialisation when a canonical encoding is not required. Ultrajson is significantly faster than simplejson in certain circumstances.
+-   Use connection pools for outgoing HTTP connections.
+-   Process thumbnails on separate threads.
+
+Configuration:
+
+-   Add option, `gzip_responses`, to disable HTTP response compression.
+
+Federation:
+
+-   Improve resilience of backfill by ensuring we fetch any missing auth events.
+-   Improve performance of backfill and joining remote rooms by removing unnecessary computations. This included handling events we\'d previously handled as well as attempting to compute the current state for outliers.
+
+Changes in synapse v0.9.1 (2015-05-26)
+======================================
+
+General:
+
+-   Add support for backfilling when a client paginates. This allows servers to request history for a room from remote servers when a client tries to paginate history the server does not have - SYN-36
+-   Fix bug where you couldn\'t disable non-default pushrules - SYN-378
+-   Fix `register_new_user` script - SYN-359
+-   Improve performance of fetching events from the database, this improves both initialSync and sending of events.
+-   Improve performance of event streams, allowing synapse to handle more simultaneous connected clients.
+
+Federation:
+
+-   Fix bug with existing backfill implementation where it returned the wrong selection of events in some circumstances.
+-   Improve performance of joining remote rooms.
+
+Configuration:
+
+-   Add support for changing the bind host of the metrics listener via the `metrics_bind_host` option.
+
+Changes in synapse v0.9.0-r5 (2015-05-21)
+=========================================
+
+-   Add more database caches to reduce amount of work done for each pusher. This radically reduces CPU usage when multiple pushers are set up in the same room.
+
+Changes in synapse v0.9.0 (2015-05-07)
+======================================
+
+General:
+
+-   Add support for using a PostgreSQL database instead of SQLite. See [docs/postgres.rst](docs/postgres.rst) for details.
+-   Add password change and reset APIs. See [Registration](https://github.com/matrix-org/matrix-doc/blob/master/specification/10_client_server_api.rst#registration) in the spec.
+-   Fix memory leak due to not releasing stale notifiers - SYN-339.
+-   Fix race in caches that occasionally caused some presence updates to be dropped - SYN-369.
+-   Check server name has not changed on restart.
+-   Add a sample systemd unit file and a logger configuration in contrib/systemd. Contributed Ivan Shapovalov.
+
+Federation:
+
+-   Add key distribution mechanisms for fetching public keys of unavailable remote home servers. See [Retrieving Server Keys](https://github.com/matrix-org/matrix-doc/blob/6f2698/specification/30_server_server_api.rst#retrieving-server-keys) in the spec.
+
+Configuration:
+
+-   Add support for multiple config files.
+-   Add support for dictionaries in config files.
+-   Remove support for specifying config options on the command line, except for:
+    -   `--daemonize` - Daemonize the home server.
+    -   `--manhole` - Turn on the twisted telnet manhole service on the given port.
+    -   `--database-path` - The path to a sqlite database to use.
+    -   `--verbose` - The verbosity level.
+    -   `--log-file` - File to log to.
+    -   `--log-config` - Python logging config file.
+    -   `--enable-registration` - Enable registration for new users.
+
+Application services:
+
+-   Reliably retry sending of events from Synapse to application services, as per [Application Services](https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api) spec.
+-   Application services can no longer register via the `/register` API, instead their configuration should be saved to a file and listed in the synapse `app_service_config_files` config option. The AS configuration file has the same format as the old `/register` request. See [docs/application\_services.rst](docs/application_services.rst) for more information.
+
+Changes in synapse v0.8.1 (2015-03-18)
+======================================
+
+-   Disable registration by default. New users can be added using the command `register_new_matrix_user` or by enabling registration in the config.
+-   Add metrics to synapse. To enable metrics use config options `enable_metrics` and `metrics_port`.
+-   Fix bug where banning only kicked the user.
+
+Changes in synapse v0.8.0 (2015-03-06)
+======================================
+
+General:
+
+-   Add support for registration fallback. This is a page hosted on the server which allows a user to register for an account, regardless of what client they are using (e.g. mobile devices).
+-   Added new default push rules and made them configurable by clients:
+    -   Suppress all notice messages.
+    -   Notify when invited to a new room.
+    -   Notify for messages that don\'t match any rule.
+    -   Notify on incoming call.
+
+Federation:
+
+-   Added per host server side rate-limiting of incoming federation requests.
+-   Added a `/get_missing_events/` API to federation to reduce number of `/events/` requests.
+
+Configuration:
+
+-   Added configuration option to disable registration: `disable_registration`.
+-   Added configuration option to change soft limit of number of open file descriptors: `soft_file_limit`.
+-   Make `tls_private_key_path` optional when running with `no_tls`.
+
+Application services:
+
+-   Application services can now poll on the CS API `/events` for their events, by providing their application service `access_token`.
+-   Added exclusive namespace support to application services API.
+
+Changes in synapse v0.7.1 (2015-02-19)
+======================================
+
+-   Initial alpha implementation of parts of the Application Services API. Including:
+    -   AS Registration / Unregistration
+    -   User Query API
+    -   Room Alias Query API
+    -   Push transport for receiving events.
+    -   User/Alias namespace admin control
+-   Add cache when fetching events from remote servers to stop repeatedly fetching events with bad signatures.
+-   Respect the per remote server retry scheme when fetching both events and server keys to reduce the number of times we send requests to dead servers.
+-   Inform remote servers when the local server fails to handle a received event.
+-   Turn off python bytecode generation due to problems experienced when upgrading from previous versions.
+
+Changes in synapse v0.7.0 (2015-02-12)
+======================================
+
+-   Add initial implementation of the query auth federation API, allowing servers to agree on whether an event should be allowed or rejected.
+-   Persist events we have rejected from federation, fixing the bug where servers would keep requesting the same events.
+-   Various federation performance improvements, including:
+    -   Add in memory caches on queries such as:
+
+        > -   Computing the state of a room at a point in time, used for authorization on federation requests.
+        > -   Fetching events from the database.
+        > -   User\'s room membership, used for authorizing presence updates.
+
+    -   Upgraded JSON library to improve parsing and serialisation speeds.
+
+-   Add default avatars to new user accounts using pydenticon library.
+-   Correctly time out federation requests.
+-   Retry federation requests against different servers.
+-   Add support for push and push rules.
+-   Add alpha versions of proposed new CSv2 APIs, including `/sync` API.
+
+Changes in synapse 0.6.1 (2015-01-07)
+=====================================
+
+-   Major optimizations to improve performance of initial sync and event sending in large rooms (by up to 10x)
+-   Media repository now includes a Content-Length header on media downloads.
+-   Improve quality of thumbnails by changing resizing algorithm.
+
+Changes in synapse 0.6.0 (2014-12-16)
+=====================================
+
+-   Add new API for media upload and download that supports thumbnailing.
+-   Replicate media uploads over multiple homeservers so media is always served to clients from their local homeserver. This obsoletes the \--content-addr parameter and confusion over accessing content directly from remote homeservers.
+-   Implement exponential backoff when retrying federation requests when sending to remote homeservers which are offline.
+-   Implement typing notifications.
+-   Fix bugs where we sent events with invalid signatures due to bugs where we incorrectly persisted events.
+-   Improve performance of database queries involving retrieving events.
+
+Changes in synapse 0.5.4a (2014-12-13)
+======================================
+
+-   Fix bug while generating the error message when a file path specified in the config doesn\'t exist.
+
+Changes in synapse 0.5.4 (2014-12-03)
+=====================================
+
+-   Fix presence bug where some rooms did not display presence updates for remote users.
+-   Do not log SQL timing log lines when started with \"-v\"
+-   Fix potential memory leak.
+
+Changes in synapse 0.5.3c (2014-12-02)
+======================================
+
+-   Change the default value for the content\_addr option to use the HTTP listener, as by default the HTTPS listener will be using a self-signed certificate.
+
+Changes in synapse 0.5.3 (2014-11-27)
+=====================================
+
+-   Fix bug that caused joining a remote room to fail if a single event was not signed correctly.
+-   Fix bug which caused servers to continuously try and fetch events from other servers.
+
+Changes in synapse 0.5.2 (2014-11-26)
+=====================================
+
+Fix major bug that caused rooms to disappear from peoples initial sync.
+
+Changes in synapse 0.5.1 (2014-11-26)
+=====================================
+
+See UPGRADES.rst for specific instructions on how to upgrade.
+
+> -   Fix bug where we served up an Event that did not match its signatures.
+> -   Fix regression where we no longer correctly handled the case where a homeserver receives an event for a room it doesn\'t recognise (but is in.)
+
+Changes in synapse 0.5.0 (2014-11-19)
+=====================================
+
+This release includes changes to the federation protocol and client-server API that is not backwards compatible.
+
+This release also changes the internal database schemas and so requires servers to drop their current history. See UPGRADES.rst for details.
+
+Homeserver:
+
+:   -   Add authentication and authorization to the federation protocol. Events are now signed by their originating homeservers.
+    -   Implement the new authorization model for rooms.
+    -   Split out web client into a seperate repository: matrix-angular-sdk.
+    -   Change the structure of PDUs.
+    -   Fix bug where user could not join rooms via an alias containing 4-byte UTF-8 characters.
+    -   Merge concept of PDUs and Events internally.
+    -   Improve logging by adding request ids to log lines.
+    -   Implement a very basic room initial sync API.
+    -   Implement the new invite/join federation APIs.
+
+Webclient:
+
+:   -   The webclient has been moved to a seperate repository.
+
+Changes in synapse 0.4.2 (2014-10-31)
+=====================================
+
+Homeserver:
+
+:   -   Fix bugs where we did not notify users of correct presence updates.
+    -   Fix bug where we did not handle sub second event stream timeouts.
+
+Webclient:
+
+:   -   Add ability to click on messages to see JSON.
+    -   Add ability to redact messages.
+    -   Add ability to view and edit all room state JSON.
+    -   Handle incoming redactions.
+    -   Improve feedback on errors.
+    -   Fix bugs in mobile CSS.
+    -   Fix bugs with desktop notifications.
+
+Changes in synapse 0.4.1 (2014-10-17)
+=====================================
+
+Webclient:
+
+:   -   Fix bug with display of timestamps.
+
+Changes in synpase 0.4.0 (2014-10-17)
+=====================================
+
+This release includes changes to the federation protocol and client-server API that is not backwards compatible.
+
+The Matrix specification has been moved to a separate git repository: <http://github.com/matrix-org/matrix-doc>
+
+You will also need an updated syutil and config. See UPGRADES.rst.
+
+Homeserver:
+
+:   -   Sign federation transactions to assert strong identity over federation.
+    -   Rename timestamp keys in PDUs and events from \'ts\' and \'hsob\_ts\' to \'origin\_server\_ts\'.
+
+Changes in synapse 0.3.4 (2014-09-25)
+=====================================
+
+This version adds support for using a TURN server. See docs/turn-howto.rst on how to set one up.
+
+Homeserver:
+
+:   -   Add support for redaction of messages.
+    -   Fix bug where inviting a user on a remote home server could take up to 20-30s.
+    -   Implement a get current room state API.
+    -   Add support specifying and retrieving turn server configuration.
+
+Webclient:
+
+:   -   Add button to send messages to users from the home page.
+    -   Add support for using TURN for VoIP calls.
+    -   Show display name change messages.
+    -   Fix bug where the client didn\'t get the state of a newly joined room until after it has been refreshed.
+    -   Fix bugs with tab complete.
+    -   Fix bug where holding down the down arrow caused chrome to chew 100% CPU.
+    -   Fix bug where desktop notifications occasionally used \"Undefined\" as the display name.
+    -   Fix more places where we sometimes saw room IDs incorrectly.
+    -   Fix bug which caused lag when entering text in the text box.
+
+Changes in synapse 0.3.3 (2014-09-22)
+=====================================
+
+Homeserver:
+
+:   -   Fix bug where you continued to get events for rooms you had left.
+
+Webclient:
+
+:   -   Add support for video calls with basic UI.
+    -   Fix bug where one to one chats were named after your display name rather than the other person\'s.
+    -   Fix bug which caused lag when typing in the textarea.
+    -   Refuse to run on browsers we know won\'t work.
+    -   Trigger pagination when joining new rooms.
+    -   Fix bug where we sometimes didn\'t display invitations in recents.
+    -   Automatically join room when accepting a VoIP call.
+    -   Disable outgoing and reject incoming calls on browsers we don\'t support VoIP in.
+    -   Don\'t display desktop notifications for messages in the room you are non-idle and speaking in.
+
+Changes in synapse 0.3.2 (2014-09-18)
+=====================================
+
+Webclient:
+
+:   -   Fix bug where an empty \"bing words\" list in old accounts didn\'t send notifications when it should have done.
+
+Changes in synapse 0.3.1 (2014-09-18)
+=====================================
+
+This is a release to hotfix v0.3.0 to fix two regressions.
+
+Webclient:
+
+:   -   Fix a regression where we sometimes displayed duplicate events.
+    -   Fix a regression where we didn\'t immediately remove rooms you were banned in from the recents list.
+
+Changes in synapse 0.3.0 (2014-09-18)
+=====================================
+
+See UPGRADE for information about changes to the client server API, including breaking backwards compatibility with VoIP calls and registration API.
+
+Homeserver:
+
+:   -   When a user changes their displayname or avatar the server will now update all their join states to reflect this.
+    -   The server now adds \"age\" key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to assume their clock is in sync with everyone else.
+    -   Fix bug where we didn\'t correctly pull in missing PDUs.
+    -   Fix bug where prev\_content key wasn\'t always returned.
+    -   Add support for password resets.
+
+Webclient:
+
+:   -   Improve page content loading.
+    -   Join/parts now trigger desktop notifications.
+    -   Always show room aliases in the UI if one is present.
+    -   No longer show user-count in the recents side panel.
+    -   Add up & down arrow support to the text box for message sending to step through your sent history.
+    -   Don\'t display notifications for our own messages.
+    -   Emotes are now formatted correctly in desktop notifications.
+    -   The recents list now differentiates between public & private rooms.
+    -   Fix bug where when switching between rooms the pagination flickered before the view jumped to the bottom of the screen.
+    -   Add bing word support.
+
+Registration API:
+
+:   -   The registration API has been overhauled to function like the login API. In practice, this means registration requests must now include the following: \'type\':\'m.login.password\'. See UPGRADE for more information on this.
+    -   The \'user\_id\' key has been renamed to \'user\' to better match the login API.
+    -   There is an additional login type: \'m.login.email.identity\'.
+    -   The command client and web client have been updated to reflect these changes.
+
+Changes in synapse 0.2.3 (2014-09-12)
+=====================================
+
+Homeserver:
+
+:   -   Fix bug where we stopped sending events to remote home servers if a user from that home server left, even if there were some still in the room.
+    -   Fix bugs in the state conflict resolution where it was incorrectly rejecting events.
+
+Webclient:
+
+:   -   Display room names and topics.
+    -   Allow setting/editing of room names and topics.
+    -   Display information about rooms on the main page.
+    -   Handle ban and kick events in real time.
+    -   VoIP UI and reliability improvements.
+    -   Add glare support for VoIP.
+    -   Improvements to initial startup speed.
+    -   Don\'t display duplicate join events.
+    -   Local echo of messages.
+    -   Differentiate sending and sent of local echo.
+    -   Various minor bug fixes.
+
+Changes in synapse 0.2.2 (2014-09-06)
+=====================================
+
+Homeserver:
+
+:   -   When the server returns state events it now also includes the previous content.
+    -   Add support for inviting people when creating a new room.
+    -   Make the homeserver inform the room via m.room.aliases when a new alias is added for a room.
+    -   Validate m.room.power\_level events.
+
+Webclient:
+
+:   -   Add support for captchas on registration.
+    -   Handle m.room.aliases events.
+    -   Asynchronously send messages and show a local echo.
+    -   Inform the UI when a message failed to send.
+    -   Only autoscroll on receiving a new message if the user was already at the bottom of the screen.
+    -   Add support for ban/kick reasons.
+
+Changes in synapse 0.2.1 (2014-09-03)
+=====================================
+
+Homeserver:
+
+:   -   Added support for signing up with a third party id.
+    -   Add synctl scripts.
+    -   Added rate limiting.
+    -   Add option to change the external address the content repo uses.
+    -   Presence bug fixes.
+
+Webclient:
+
+:   -   Added support for signing up with a third party id.
+    -   Added support for banning and kicking users.
+    -   Added support for displaying and setting ops.
+    -   Added support for room names.
+    -   Fix bugs with room membership event display.
+
+Changes in synapse 0.2.0 (2014-09-02)
+=====================================
+
+This update changes many configuration options, updates the database schema and mandates SSL for server-server connections.
+
+Homeserver:
+
+:   -   Require SSL for server-server connections.
+    -   Add SSL listener for client-server connections.
+    -   Add ability to use config files.
+    -   Add support for kicking/banning and power levels.
+    -   Allow setting of room names and topics on creation.
+    -   Change presence to include last seen time of the user.
+    -   Change url path prefix to /\_matrix/\...
+    -   Bug fixes to presence.
+
+Webclient:
+
+:   -   Reskin the CSS for registration and login.
+    -   Various improvements to rooms CSS.
+    -   Support changes in client-server API.
+    -   Bug fixes to VOIP UI.
+    -   Various bug fixes to handling of changes to room member list.
+
+Changes in synapse 0.1.2 (2014-08-29)
+=====================================
+
+Webclient:
+
+:   -   Add basic call state UI for VoIP calls.
+
+Changes in synapse 0.1.1 (2014-08-29)
+=====================================
+
+Homeserver:
+
+:   -   Fix bug that caused the event stream to not notify some clients about changes.
+
+Changes in synapse 0.1.0 (2014-08-29)
+=====================================
+
+Presence has been reenabled in this release.
+
+Homeserver:
+
+:   -
+
+        Update client to server API, including:
+
+        :   -   Use a more consistent url scheme.
+            -   Provide more useful information in the initial sync api.
+
+    -   Change the presence handling to be much more efficient.
+    -   Change the presence server to server API to not require explicit polling of all users who share a room with a user.
+    -   Fix races in the event streaming logic.
+
+Webclient:
+
+:   -   Update to use new client to server API.
+    -   Add basic VOIP support.
+    -   Add idle timers that change your status to away.
+    -   Add recent rooms column when viewing a room.
+    -   Various network efficiency improvements.
+    -   Add basic mobile browser support.
+    -   Add a settings page.
+
+Changes in synapse 0.0.1 (2014-08-22)
+=====================================
+
+Presence has been disabled in this release due to a bug that caused the homeserver to spam other remote homeservers.
+
+Homeserver:
+
+:   -   Completely change the database schema to support generic event types.
+    -   Improve presence reliability.
+    -   Improve reliability of joining remote rooms.
+    -   Fix bug where room join events were duplicated.
+    -   Improve initial sync API to return more information to the client.
+    -   Stop generating fake messages for room membership events.
+
+Webclient:
+
+:   -   Add tab completion of names.
+    -   Add ability to upload and send images.
+    -   Add profile pages.
+    -   Improve CSS layout of room.
+    -   Disambiguate identical display names.
+    -   Don\'t get remote users display names and avatars individually.
+    -   Use the new initial sync API to reduce number of round trips to the homeserver.
+    -   Change url scheme to use room aliases instead of room ids where known.
+    -   Increase longpoll timeout.
+
+Changes in synapse 0.0.0 (2014-08-13)
+=====================================
+
+> -   Initial alpha release
diff --git a/CHANGES.rst b/CHANGES.rst
deleted file mode 100644
index 4047f50aa5..0000000000
--- a/CHANGES.rst
+++ /dev/null
@@ -1,2761 +0,0 @@
-Changes in synapse v0.31.1 (2018-06-08)
-=======================================
-
-v0.31.1 fixes a security bug in the ``get_missing_events`` federation API
-where event visibility rules were not applied correctly.
-
-We are not aware of it being actively exploited but please upgrade asap.
-
-Bug Fixes:
-
-* Fix event filtering in get_missing_events handler (PR #3371)
-
-Changes in synapse v0.31.0 (2018-06-06)
-=======================================
-
-Most notable change from v0.30.0 is to switch to the python prometheus library to improve system
-stats reporting. WARNING: this changes a number of prometheus metrics in a
-backwards-incompatible manner. For more details, see
-`docs/metrics-howto.rst <docs/metrics-howto.rst#removal-of-deprecated-metrics--time-based-counters-becoming-histograms-in-0310>`_.
-
-Bug Fixes:
-
-* Fix metric documentation tables (PR #3341)
-* Fix LaterGauge error handling (694968f)
-* Fix replication metrics (b7e7fd2)
-
-Changes in synapse v0.31.0-rc1 (2018-06-04)
-==========================================
-
-Features:
-
-* Switch to the Python Prometheus library (PR #3256, #3274)
-* Let users leave the server notice room after joining (PR #3287)
-
-
-Changes:
-
-* daily user type phone home stats (PR #3264)
-* Use iter* methods for _filter_events_for_server (PR #3267)
-* Docs on consent bits (PR #3268)
-* Remove users from user directory on deactivate (PR #3277)
-* Avoid sending consent notice to guest users (PR #3288)
-* disable CPUMetrics if no /proc/self/stat (PR #3299)
-* Consistently use six's iteritems and wrap lazy keys/values in list() if they're not meant to be lazy (PR #3307)
-* Add private IPv6 addresses to example config for url preview blacklist (PR #3317) Thanks to @thegcat!
-* Reduce stuck read-receipts: ignore depth when updating (PR #3318)
-* Put python's logs into Trial when running unit tests (PR #3319)
-
-Changes, python 3 migration:
-
-* Replace some more comparisons with six (PR #3243) Thanks to @NotAFile!
-* replace some iteritems with six (PR #3244) Thanks to @NotAFile!
-* Add batch_iter to utils (PR #3245) Thanks to @NotAFile!
-* use repr, not str (PR #3246) Thanks to @NotAFile!
-* Misc Python3 fixes (PR #3247) Thanks to @NotAFile!
-* Py3 storage/_base.py (PR #3278) Thanks to @NotAFile!
-* more six iteritems (PR #3279) Thanks to @NotAFile!
-* More Misc. py3 fixes (PR #3280) Thanks to @NotAFile!
-* remaining isintance fixes (PR #3281) Thanks to @NotAFile!
-* py3-ize state.py (PR #3283) Thanks to @NotAFile!
-* extend tox testing for py3 to avoid regressions (PR #3302) Thanks to @krombel!
-* use memoryview in py3 (PR #3303) Thanks to @NotAFile!
-
-Bugs:
-
-* Fix federation backfill bugs (PR #3261)
-* federation: fix LaterGauge usage (PR #3328) Thanks to @intelfx!
-
-
-Changes in synapse v0.30.0 (2018-05-24)
-==========================================
-
-'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
-channel whereby server administrators can send messages to users on the server.
-
-They are used as part of communication of the server policies (see ``docs/consent_tracking.md``),
-however the intention is that they may also find a use for features such
-as "Message of the day".
-
-This feature is specific to Synapse, but uses standard Matrix communication mechanisms,
-so should work with any Matrix client. For more details see ``docs/server_notices.md``
-
-Further Server Notices/Consent Tracking Support:
-
-* Allow overriding the server_notices user's avatar (PR #3273)
-* Use the localpart in the consent uri (PR #3272)
-* Support for putting %(consent_uri)s in messages (PR #3271)
-* Block attempts to send server notices to remote users (PR #3270)
-* Docs on consent bits (PR #3268)
-
-
-
-Changes in synapse v0.30.0-rc1 (2018-05-23)
-==========================================
-
-Server Notices/Consent Tracking Support:
-
-* ConsentResource to gather policy consent from users (PR #3213)
-* Move RoomCreationHandler out of synapse.handlers.Handlers (PR #3225)
-* Infrastructure for a server notices room (PR #3232)
-* Send users a server notice about consent (PR #3236)
-* Reject attempts to send event before privacy consent is given (PR #3257)
-* Add a 'has_consented' template var to consent forms (PR #3262)
-* Fix dependency on jinja2 (PR #3263)
-
-Features:
-
-* Cohort analytics (PR #3163, #3241, #3251)
-* Add lxml to docker image for web previews (PR #3239) Thanks to @ptman!
-* Add in flight request metrics (PR #3252)
-
-Changes:
-
-* Remove unused `update_external_syncs` (PR #3233)
-* Use stream rather depth ordering for push actions (PR #3212)
-* Make purge_history operate on tokens (PR #3221)
-* Don't support limitless pagination (PR #3265)
-
-Bug Fixes:
-
-* Fix logcontext resource usage tracking (PR #3258)
-* Fix error in handling receipts (PR #3235)
-* Stop the transaction cache caching failures (PR #3255)
-
-
-Changes in synapse v0.29.1 (2018-05-17)
-==========================================
-Changes:
-
-* Update docker documentation (PR #3222)
-
-Changes in synapse v0.29.0 (2018-05-16)
-===========================================
-Not changes since v0.29.0-rc1
-
-Changes in synapse v0.29.0-rc1 (2018-05-14)
-===========================================
-
-Notable changes, a docker file for running Synapse (Thanks to @kaiyou!) and a
-closed spec bug in the Client Server API. Additionally further prep for Python 3
-migration.
-
-Potentially breaking change:
-
-* Make Client-Server API return 401 for invalid token (PR #3161).
-
-  This changes the Client-server spec to return a 401 error code instead of 403
-  when the access token is unrecognised. This is the behaviour required by the
-  specification, but some clients may be relying on the old, incorrect
-  behaviour.
-
-  Thanks to @NotAFile for fixing this.
-
-Features:
-
-* Add a Dockerfile for synapse (PR #2846) Thanks to @kaiyou!
-
-Changes - General:
-
-* nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77!
-* Part user from rooms on account deactivate (PR #3201)
-* Make 'unexpected logging context' into warnings (PR #3007)
-* Set Server header in SynapseRequest (PR #3208)
-* remove duplicates from groups tables (PR #3129)
-* Improve exception handling for background processes (PR #3138)
-* Add missing consumeErrors to improve exception handling (PR #3139)
-* reraise exceptions more carefully (PR #3142)
-* Remove redundant call to preserve_fn (PR #3143)
-* Trap exceptions thrown within run_in_background (PR #3144)
-
-Changes - Refactors:
-
-* Refactor /context to reuse pagination storage functions (PR #3193)
-* Refactor recent events func to use pagination func (PR #3195)
-* Refactor pagination DB API to return concrete type (PR #3196)
-* Refactor get_recent_events_for_room return type (PR #3198)
-* Refactor sync APIs to reuse pagination API (PR #3199)
-* Remove unused code path from member change DB func (PR #3200)
-* Refactor request handling wrappers (PR #3203)
-* transaction_id, destination defined twice (PR #3209) Thanks to @damir-manapov!
-* Refactor event storage to prepare for changes in state calculations (PR #3141)
-* Set Server header in SynapseRequest (PR #3208)
-* Use deferred.addTimeout instead of time_bound_deferred (PR #3127, #3178)
-* Use run_in_background in preference to preserve_fn (PR #3140)
-
-Changes - Python 3 migration:
-
-* Construct HMAC as bytes on py3 (PR #3156) Thanks to @NotAFile!
-* run config tests on py3 (PR #3159) Thanks to @NotAFile!
-* Open certificate files as bytes (PR #3084) Thanks to @NotAFile!
-* Open config file in non-bytes mode (PR #3085) Thanks to @NotAFile!
-* Make event properties raise AttributeError instead (PR #3102) Thanks to @NotAFile!
-* Use six.moves.urlparse (PR #3108) Thanks to @NotAFile!
-* Add py3 tests to tox with folders that work (PR #3145) Thanks to @NotAFile!
-* Don't yield in list comprehensions (PR #3150) Thanks to @NotAFile!
-* Move more xrange to six (PR #3151) Thanks to @NotAFile!
-* make imports local (PR #3152) Thanks to @NotAFile!
-* move httplib import to six (PR #3153) Thanks to @NotAFile!
-* Replace stringIO imports with six (PR #3154, #3168) Thanks to @NotAFile!
-* more bytes strings (PR #3155) Thanks to @NotAFile!
-
-Bug Fixes:
-
-* synapse fails to start under Twisted >= 18.4 (PR #3157)
-* Fix a class of logcontext leaks (PR #3170)
-* Fix a couple of logcontext leaks in unit tests (PR #3172)
-* Fix logcontext leak in media repo (PR #3174)
-* Escape label values in prometheus metrics (PR #3175, #3186)
-* Fix 'Unhandled Error' logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot!
-* Fix logcontext leaks in rate limiter (PR #3183)
-* notifications: Convert next_token to string according to the spec (PR #3190) Thanks to @mujx!
-* nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77!
-* add guard for None on purge_history api (PR #3160) Thanks to @krombel!
-
-Changes in synapse v0.28.1 (2018-05-01)
-=======================================
-
-SECURITY UPDATE
-
-* Clamp the allowed values of event depth received over federation to be
-  [0, 2^63 - 1].  This mitigates an attack where malicious events
-  injected with depth = 2^63 - 1 render rooms unusable.  Depth is used to
-  determine the cosmetic ordering of events within a room, and so the ordering
-  of events in such a room will default to using stream_ordering rather than depth
-  (topological_ordering).
-
-  This is a temporary solution to mitigate abuse in the wild, whilst a long term solution
-  is being implemented to improve how the depth parameter is used.
-
-  Full details at
-  https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI
-
-* Pin Twisted to <18.4 until we stop using the private _OpenSSLECCurve API.
-
-
-Changes in synapse v0.28.0 (2018-04-26)
-=======================================
-
-Bug Fixes:
-
-* Fix quarantine media admin API and search reindex (PR #3130)
-* Fix media admin APIs (PR #3134)
-
-
-Changes in synapse v0.28.0-rc1 (2018-04-24)
-===========================================
-
-Minor performance improvement to federation sending and bug fixes.
-
-(Note: This release does not include the delta state resolution implementation discussed in matrix live)
-
-
-Features:
-
-* Add metrics for event processing lag (PR #3090)
-* Add metrics for ResponseCache (PR #3092)
-
-Changes:
-
-* Synapse on PyPy (PR #2760) Thanks to @Valodim!
-* move handling of auto_join_rooms to RegisterHandler (PR #2996) Thanks to @krombel!
-* Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh!
-* Document the behaviour of ResponseCache (PR #3059)
-* Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile!
-* update prometheus dashboard to use new metric names (PR #3069) Thanks to @krombel!
-* use python3-compatible prints (PR #3074) Thanks to @NotAFile!
-* Send federation events concurrently (PR #3078)
-* Limit concurrent event sends for a room (PR #3079)
-* Improve R30 stat definition (PR #3086)
-* Send events to ASes concurrently (PR #3088)
-* Refactor ResponseCache usage (PR #3093)
-* Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh!
-* Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile!
-* Use six.itervalues in some places (PR #3106) Thanks to @NotAFile!
-* Refactor store.have_events (PR #3117)
-
-Bug Fixes:
-
-* Return 401 for invalid access_token on logout (PR #2938) Thanks to @dklug!
-* Return a 404 rather than a 500 on rejoining empty rooms (PR #3080)
-* fix federation_domain_whitelist (PR #3099)
-* Avoid creating events with huge numbers of prev_events (PR #3113)
-* Reject events which have lots of prev_events (PR #3118)
-
-
-Changes in synapse v0.27.4 (2018-04-13)
-======================================
-
-Changes:
-
-* Update canonicaljson dependency (#3095)
-
-
-Changes in synapse v0.27.3 (2018-04-11)
-======================================
-
-Bug fixes:
-
-* URL quote path segments over federation (#3082)
-
-Changes in synapse v0.27.3-rc2 (2018-04-09)
-==========================================
-
-v0.27.3-rc1 used a stale version of the develop branch so the changelog overstates
-the functionality. v0.27.3-rc2 is up to date, rc1 should be ignored.
-
-Changes in synapse v0.27.3-rc1 (2018-04-09)
-=======================================
-
-Notable changes include API support for joinability of groups. Also new metrics
-and phone home stats. Phone home stats include better visibility of system usage
-so we can tweak synpase to work better for all users rather than our own experience
-with matrix.org. Also, recording 'r30' stat which is the measure we use to track
-overal growth of the Matrix ecosystem. It is defined as:-
-
-Counts the number of native 30 day retained users, defined as:-
-         * Users who have created their accounts more than 30 days
-         * Where last seen at most 30 days ago
-         * Where account creation and last_seen are > 30 days"
-
-
-Features:
-
-* Add joinability for groups (PR #3045)
-* Implement group join API (PR #3046)
-* Add counter metrics for calculating state delta (PR #3033)
-* R30 stats (PR #3041)
-* Measure time it takes to calculate state group ID (PR #3043)
-* Add basic performance statistics to phone home (PR #3044)
-* Add response size metrics (PR #3071)
-* phone home cache size configurations (PR #3063)
-
-Changes:
-
-* Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live!
-* Replace old style error catching with 'as' keyword (PR #3000) Thanks to @NotAFile!
-* Use .iter* to avoid copies in StateHandler (PR #3006)
-* Linearize calls to _generate_user_id (PR #3029)
-* Remove last usage of ujson (PR #3030)
-* Use simplejson throughout (PR #3048)
-* Use static JSONEncoders (PR #3049)
-* Remove uses of events.content (PR #3060)
-* Improve database cache performance (PR #3068)
-
-Bug fixes:
-
-* Add room_id to the response of `rooms/{roomId}/join` (PR #2986) Thanks to @jplatte!
-* Fix replication after switch to simplejson (PR #3015)
-* 404 correctly on missing paths via NoResource (PR #3022)
-* Fix error when claiming e2e keys from offline servers (PR #3034)
-* fix tests/storage/test_user_directory.py (PR #3042)
-* use PUT instead of POST for federating groups/m.join_policy (PR #3070) Thanks to @krombel!
-* postgres port script: fix state_groups_pkey error (PR #3072)
-
-
-Changes in synapse v0.27.2 (2018-03-26)
-=======================================
-
-Bug fixes:
-
-* Fix bug which broke TCP replication between workers (PR #3015)
-
-
-Changes in synapse v0.27.1 (2018-03-26)
-=======================================
-
-Meta release as v0.27.0 temporarily pointed to the wrong commit
-
-
-Changes in synapse v0.27.0 (2018-03-26)
-=======================================
-
-No changes since v0.27.0-rc2
-
-
-Changes in synapse v0.27.0-rc2 (2018-03-19)
-===========================================
-
-Pulls in v0.26.1
-
-Bug fixes:
-
-* Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache (PR #3005)
-
-
-Changes in synapse v0.26.1 (2018-03-15)
-=======================================
-
-Bug fixes:
-
-* Fix bug where an invalid event caused server to stop functioning correctly,
-  due to parsing and serializing bugs in ujson library (PR #3008)
-
-
-Changes in synapse v0.27.0-rc1 (2018-03-14)
-===========================================
-
-The common case for running Synapse is not to run separate workers, but for those that do, be aware that synctl no longer starts the main synapse when using ``-a`` option with workers. A new worker file should be added with ``worker_app: synapse.app.homeserver``.
-
-This release also begins the process of renaming a number of the metrics
-reported to prometheus. See `docs/metrics-howto.rst <docs/metrics-howto.rst#block-and-response-metrics-renamed-for-0-27-0>`_.
-Note that the v0.28.0 release will remove the deprecated metric names.
-
-Features:
-
-* Add ability for ASes to override message send time (PR #2754)
-* Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767)
-* Add purge API features, see `docs/admin_api/purge_history_api.rst <docs/admin_api/purge_history_api.rst>`_ for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
-* Add support for whitelisting 3PIDs that users can register. (PR #2813)
-* Add ``/room/{id}/event/{id}`` API (PR #2766)
-* Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live!
-* Add ``federation_domain_whitelist`` option (PR #2820, #2821)
-
-
-Changes:
-
-* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst <docs/workers.rst>`_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784)
-* Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
-* Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
-* No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!
-* Remove ``verbosity``/``log_file`` from generated config (PR #2755)
-* Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838)
-* When using synctl with workers, don't start the main synapse automatically (PR #2774)
-* Minor performance improvements (PR #2773, #2792)
-* Use a connection pool for non-federation outbound connections (PR #2817)
-* Make it possible to run unit tests against postgres (PR #2829)
-* Update pynacl dependency to 1.2.1 or higher (PR #2888) Thanks to @bachp!
-* Remove ability for AS users to call /events and /sync (PR #2948)
-* Use bcrypt.checkpw (PR #2949) Thanks to @krombel!
-
-Bug fixes:
-
-* Fix broken ``ldap_config`` config option (PR #2683) Thanks to @seckrv!
-* Fix error message when user is not allowed to unban (PR #2761) Thanks to @turt2live!
-* Fix publicised groups GET API (singular) over federation (PR #2772)
-* Fix user directory when using ``user_directory_search_all_users`` config option (PR #2803, #2831)
-* Fix error on ``/publicRooms`` when no rooms exist (PR #2827)
-* Fix bug in quarantine_media (PR #2837)
-* Fix url_previews when no Content-Type is returned from URL (PR #2845)
-* Fix rare race in sync API when joining room (PR #2944)
-* Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848)
-
-
-Changes in synapse v0.26.0 (2018-01-05)
-=======================================
-
-No changes since v0.26.0-rc1
-
-
-Changes in synapse v0.26.0-rc1 (2017-12-13)
-===========================================
-
-Features:
-
-* Add ability for ASes to publicise groups for their users (PR #2686)
-* Add all local users to the user_directory and optionally search them (PR
-  #2723)
-* Add support for custom login types for validating users (PR #2729)
-
-
-Changes:
-
-* Update example Prometheus config to new format (PR #2648) Thanks to
-  @krombel!
-* Rename redact_content option to include_content in Push API (PR #2650)
-* Declare support for r0.3.0 (PR #2677)
-* Improve upserts (PR #2684, #2688, #2689, #2713)
-* Improve documentation of workers (PR #2700)
-* Improve tracebacks on exceptions (PR #2705)
-* Allow guest access to group APIs for reading (PR #2715)
-* Support for posting content in federation_client script (PR #2716)
-* Delete devices and pushers on logouts etc (PR #2722)
-
-
-Bug fixes:
-
-* Fix database port script (PR #2673)
-* Fix internal server error on login with ldap_auth_provider (PR #2678) Thanks
-  to @jkolo!
-* Fix error on sqlite 3.7 (PR #2697)
-* Fix OPTIONS on preview_url (PR #2707)
-* Fix error handling on dns lookup (PR #2711)
-* Fix wrong avatars when inviting multiple users when creating room (PR #2717)
-* Fix 500 when joining matrix-dev (PR #2719)
-
-
-Changes in synapse v0.25.1 (2017-11-17)
-=======================================
-
-Bug fixes:
-
-* Fix login with LDAP and other password provider modules (PR #2678). Thanks to
-  @jkolo!
-
-Changes in synapse v0.25.0 (2017-11-15)
-=======================================
-
-Bug fixes:
-
-* Fix port script (PR #2673)
-
-
-Changes in synapse v0.25.0-rc1 (2017-11-14)
-===========================================
-
-Features:
-
-* Add is_public to groups table to allow for private groups (PR #2582)
-* Add a route for determining who you are (PR #2668) Thanks to @turt2live!
-* Add more features to the password providers (PR #2608, #2610, #2620, #2622,
-  #2623, #2624, #2626, #2628, #2629)
-* Add a hook for custom rest endpoints (PR #2627)
-* Add API to update group room visibility (PR #2651)
-
-
-Changes:
-
-* Ignore <noscript> tags when generating URL preview descriptions (PR #2576)
-  Thanks to @maximevaillancourt!
-* Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to
-  @krombel!
-* Support /keys/upload on /r0 as well as /unstable (PR #2585)
-* Front-end proxy: pass through auth header (PR #2586)
-* Allow ASes to deactivate their own users (PR #2589)
-* Remove refresh tokens (PR #2613)
-* Automatically set default displayname on register (PR #2617)
-* Log login requests (PR #2618)
-* Always return `is_public` in the `/groups/:group_id/rooms` API (PR #2630)
-* Avoid no-op media deletes (PR #2637) Thanks to @spantaleev!
-* Fix various embarrassing typos around user_directory and add some doc. (PR
-  #2643)
-* Return whether a user is an admin within a group (PR #2647)
-* Namespace visibility options for groups (PR #2657)
-* Downcase UserIDs on registration (PR #2662)
-* Cache failures when fetching URL previews (PR #2669)
-
-
-Bug fixes:
-
-* Fix port script (PR #2577)
-* Fix error when running synapse with no logfile (PR #2581)
-* Fix UI auth when deleting devices (PR #2591)
-* Fix typo when checking if user is invited to group (PR #2599)
-* Fix the port script to drop NUL values in all tables (PR #2611)
-* Fix appservices being backlogged and not receiving new events due to a bug in
-  notify_interested_services (PR #2631) Thanks to @xyzz!
-* Fix updating rooms avatar/display name when modified by admin (PR #2636)
-  Thanks to @farialima!
-* Fix bug in state group storage (PR #2649)
-* Fix 500 on invalid utf-8 in request (PR #2663)
-
-
-Changes in synapse v0.24.1 (2017-10-24)
-=======================================
-
-Bug fixes:
-
-* Fix updating group profiles over federation (PR #2567)
-
-
-Changes in synapse v0.24.0 (2017-10-23)
-=======================================
-
-No changes since v0.24.0-rc1
-
-
-Changes in synapse v0.24.0-rc1 (2017-10-19)
-===========================================
-
-Features:
-
-* Add Group Server (PR #2352, #2363, #2374, #2377, #2378, #2382, #2410, #2426,
-  #2430, #2454, #2471, #2472, #2544)
-* Add support for channel notifications (PR #2501)
-* Add basic implementation of backup media store (PR #2538)
-* Add config option to auto-join new users to rooms (PR #2545)
-
-
-Changes:
-
-* Make the spam checker a module (PR #2474)
-* Delete expired url cache data (PR #2478)
-* Ignore incoming events for rooms that we have left (PR #2490)
-* Allow spam checker to reject invites too (PR #2492)
-* Add room creation checks to spam checker (PR #2495)
-* Spam checking: add the invitee to user_may_invite (PR #2502)
-* Process events from federation for different rooms in parallel (PR #2520)
-* Allow error strings from spam checker (PR #2531)
-* Improve error handling for missing files in config (PR #2551)
-
-
-Bug fixes:
-
-* Fix handling SERVFAILs when doing AAAA lookups for federation (PR #2477)
-* Fix incompatibility with newer versions of ujson (PR #2483) Thanks to
-  @jeremycline!
-* Fix notification keywords that start/end with non-word chars (PR #2500)
-* Fix stack overflow and logcontexts from linearizer (PR #2532)
-* Fix 500 error when fields missing from power_levels event (PR #2552)
-* Fix 500 error when we get an error handling a PDU (PR #2553)
-
-
-Changes in synapse v0.23.1 (2017-10-02)
-=======================================
-
-Changes:
-
-* Make 'affinity' package optional, as it is not supported on some platforms
-
-
-Changes in synapse v0.23.0 (2017-10-02)
-=======================================
-
-No changes since v0.23.0-rc2
-
-
-Changes in synapse v0.23.0-rc2 (2017-09-26)
-===========================================
-
-Bug fixes:
-
-* Fix regression in performance of syncs (PR #2470)
-
-
-Changes in synapse v0.23.0-rc1 (2017-09-25)
-===========================================
-
-Features:
-
-* Add a frontend proxy worker (PR #2344)
-* Add support for event_id_only push format (PR #2450)
-* Add a PoC for filtering spammy events (PR #2456)
-* Add a config option to block all room invites (PR #2457)
-
-
-Changes:
-
-* Use bcrypt module instead of py-bcrypt (PR #2288) Thanks to @kyrias!
-* Improve performance of generating push notifications (PR #2343, #2357, #2365,
-  #2366, #2371)
-* Improve DB performance for device list handling in sync (PR #2362)
-* Include a sample prometheus config (PR #2416)
-* Document known to work postgres version (PR #2433) Thanks to @ptman!
-
-
-Bug fixes:
-
-* Fix caching error in the push evaluator (PR #2332)
-* Fix bug where pusherpool didn't start and broke some rooms (PR #2342)
-* Fix port script for user directory tables (PR #2375)
-* Fix device lists notifications when user rejoins a room (PR #2443, #2449)
-* Fix sync to always send down current state events in timeline (PR #2451)
-* Fix bug where guest users were incorrectly kicked (PR #2453)
-* Fix bug talking to IPv6 only servers using SRV records (PR #2462)
-
-
-Changes in synapse v0.22.1 (2017-07-06)
-=======================================
-
-Bug fixes:
-
-* Fix bug where pusher pool didn't start and caused issues when
-  interacting with some rooms (PR #2342)
-
-
-Changes in synapse v0.22.0 (2017-07-06)
-=======================================
-
-No changes since v0.22.0-rc2
-
-
-Changes in synapse v0.22.0-rc2 (2017-07-04)
-===========================================
-
-Changes:
-
-* Improve performance of storing user IPs (PR #2307, #2308)
-* Slightly improve performance of verifying access tokens (PR #2320)
-* Slightly improve performance of event persistence (PR #2321)
-* Increase default cache factor size from 0.1 to 0.5 (PR #2330)
-
-Bug fixes:
-
-* Fix bug with storing registration sessions that caused frequent CPU churn
-  (PR #2319)
-
-
-Changes in synapse v0.22.0-rc1 (2017-06-26)
-===========================================
-
-Features:
-
-* Add a user directory API (PR #2252, and many more)
-* Add shutdown room API to remove room from local server (PR #2291)
-* Add API to quarantine media (PR #2292)
-* Add new config option to not send event contents to push servers (PR #2301)
-  Thanks to @cjdelisle!
-
-Changes:
-
-* Various performance fixes (PR #2177, #2233, #2230, #2238, #2248, #2256,
-  #2274)
-* Deduplicate sync filters (PR #2219) Thanks to @krombel!
-* Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist!
-* Add count of one time keys to sync stream (PR #2237)
-* Only store event_auth for state events (PR #2247)
-* Store URL cache preview downloads separately (PR #2299)
-
-Bug fixes:
-
-* Fix users not getting notifications when AS listened to that user_id (PR
-  #2216) Thanks to @slipeer!
-* Fix users without push set up not getting notifications after joining rooms
-  (PR #2236)
-* Fix preview url API to trim long descriptions (PR #2243)
-* Fix bug where we used cached but unpersisted state group as prev group,
-  resulting in broken state of restart (PR #2263)
-* Fix removing of pushers when using workers (PR #2267)
-* Fix CORS headers to allow Authorization header (PR #2285) Thanks to @krombel!
-
-
-Changes in synapse v0.21.1 (2017-06-15)
-=======================================
-
-Bug fixes:
-
-* Fix bug in anonymous usage statistic reporting (PR #2281)
-
-
-Changes in synapse v0.21.0 (2017-05-18)
-=======================================
-
-No changes since v0.21.0-rc3
-
-
-Changes in synapse v0.21.0-rc3 (2017-05-17)
-===========================================
-
-Features:
-
-* Add per user rate-limiting overrides (PR #2208)
-* Add config option to limit maximum number of events requested by ``/sync``
-  and ``/messages`` (PR #2221) Thanks to @psaavedra!
-
-
-Changes:
-
-* Various small performance fixes (PR #2201, #2202, #2224, #2226, #2227, #2228,
-  #2229)
-* Update username availability checker API (PR #2209, #2213)
-* When purging, don't de-delta state groups we're about to delete (PR #2214)
-* Documentation to check synapse version (PR #2215) Thanks to @hamber-dick!
-* Add an index to event_search to speed up purge history API (PR #2218)
-
-
-Bug fixes:
-
-* Fix API to allow clients to upload one-time-keys with new sigs (PR #2206)
-
-
-Changes in synapse v0.21.0-rc2 (2017-05-08)
-===========================================
-
-Changes:
-
-* Always mark remotes as up if we receive a signed request from them (PR #2190)
-
-
-Bug fixes:
-
-* Fix bug where users got pushed for rooms they had muted (PR #2200)
-
-
-Changes in synapse v0.21.0-rc1 (2017-05-08)
-===========================================
-
-Features:
-
-* Add username availability checker API (PR #2183)
-* Add read marker API (PR #2120)
-
-
-Changes:
-
-* Enable guest access for the 3pl/3pid APIs (PR #1986)
-* Add setting to support TURN for guests (PR #2011)
-* Various performance improvements (PR #2075, #2076, #2080, #2083, #2108,
-  #2158, #2176, #2185)
-* Make synctl a bit more user friendly (PR #2078, #2127) Thanks @APwhitehat!
-* Replace HTTP replication with TCP replication (PR #2082, #2097, #2098,
-  #2099, #2103, #2014, #2016, #2115, #2116, #2117)
-* Support authenticated SMTP (PR #2102) Thanks @DanielDent!
-* Add a counter metric for successfully-sent transactions (PR #2121)
-* Propagate errors sensibly from proxied IS requests (PR #2147)
-* Add more granular event send metrics (PR #2178)
-
-
-
-Bug fixes:
-
-* Fix nuke-room script to work with current schema (PR #1927) Thanks
-  @zuckschwerdt!
-* Fix db port script to not assume postgres tables are in the public schema
-  (PR #2024) Thanks @jerrykan!
-* Fix getting latest device IP for user with no devices (PR #2118)
-* Fix rejection of invites to unreachable servers (PR #2145)
-* Fix code for reporting old verify keys in synapse (PR #2156)
-* Fix invite state to always include all events (PR #2163)
-* Fix bug where synapse would always fetch state for any missing event (PR #2170)
-* Fix a leak with timed out HTTP connections (PR #2180)
-* Fix bug where we didn't time out HTTP requests to ASes  (PR #2192)
-
-
-Docs:
-
-* Clarify doc for SQLite to PostgreSQL port (PR #1961) Thanks @benhylau!
-* Fix typo in synctl help (PR #2107) Thanks @HarHarLinks!
-* ``web_client_location`` documentation fix (PR #2131) Thanks @matthewjwolff!
-* Update README.rst with FreeBSD changes (PR #2132) Thanks @feld!
-* Clarify setting up metrics (PR #2149) Thanks @encks!
-
-
-Changes in synapse v0.20.0 (2017-04-11)
-=======================================
-
-Bug fixes:
-
-* Fix joining rooms over federation where not all servers in the room saw the
-  new server had joined (PR #2094)
-
-
-Changes in synapse v0.20.0-rc1 (2017-03-30)
-===========================================
-
-Features:
-
-* Add delete_devices API (PR #1993)
-* Add phone number registration/login support (PR #1994, #2055)
-
-
-Changes:
-
-* Use JSONSchema for validation of filters. Thanks @pik! (PR #1783)
-* Reread log config on SIGHUP (PR #1982)
-* Speed up public room list (PR #1989)
-* Add helpful texts to logger config options (PR #1990)
-* Minor ``/sync`` performance improvements. (PR #2002, #2013, #2022)
-* Add some debug to help diagnose weird federation issue (PR #2035)
-* Correctly limit retries for all federation requests (PR #2050, #2061)
-* Don't lock table when persisting new one time keys (PR #2053)
-* Reduce some CPU work on DB threads (PR #2054)
-* Cache hosts in room (PR #2060)
-* Batch sending of device list pokes (PR #2063)
-* Speed up persist event path in certain edge cases (PR #2070)
-
-
-Bug fixes:
-
-* Fix bug where current_state_events renamed to current_state_ids (PR #1849)
-* Fix routing loop when fetching remote media (PR #1992)
-* Fix current_state_events table to not lie (PR #1996)
-* Fix CAS login to handle PartialDownloadError (PR #1997)
-* Fix assertion to stop transaction queue getting wedged (PR #2010)
-* Fix presence to fallback to last_active_ts if it beats the last sync time.
-  Thanks @Half-Shot! (PR #2014)
-* Fix bug when federation received a PDU while a room join is in progress (PR
-  #2016)
-* Fix resetting state on rejected events (PR #2025)
-* Fix installation issues in readme. Thanks @ricco386 (PR #2037)
-* Fix caching of remote servers' signature keys (PR #2042)
-* Fix some leaking log context (PR #2048, #2049, #2057, #2058)
-* Fix rejection of invites not reaching sync (PR #2056)
-
-
-
-Changes in synapse v0.19.3 (2017-03-20)
-=======================================
-
-No changes since v0.19.3-rc2
-
-
-Changes in synapse v0.19.3-rc2 (2017-03-13)
-===========================================
-
-Bug fixes:
-
-* Fix bug in handling of incoming device list updates over federation.
-
-
-
-Changes in synapse v0.19.3-rc1 (2017-03-08)
-===========================================
-
-Features:
-
-* Add some administration functionalities. Thanks to morteza-araby! (PR #1784)
-
-
-Changes:
-
-* Reduce database table sizes (PR #1873, #1916, #1923, #1963)
-* Update contrib/ to not use syutil. Thanks to andrewshadura! (PR #1907)
-* Don't fetch current state when sending an event in common case (PR #1955)
-
-
-Bug fixes:
-
-* Fix synapse_port_db failure. Thanks to Pneumaticat! (PR #1904)
-* Fix caching to not cache error responses (PR #1913)
-* Fix APIs to make kick & ban reasons work (PR #1917)
-* Fix bugs in the /keys/changes api (PR #1921)
-* Fix bug where users couldn't forget rooms they were banned from (PR #1922)
-* Fix issue with long language values in pushers API (PR #1925)
-* Fix a race in transaction queue (PR #1930)
-* Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! (PR
-  #1945)
-* Fix device list update to not constantly resync (PR #1964)
-* Fix potential for huge memory usage when getting device that have
-  changed (PR #1969)
-
-
-
-Changes in synapse v0.19.2 (2017-02-20)
-=======================================
-
-* Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for
-  pointing it out! (PR #1929)
-
-
-Changes in synapse v0.19.1 (2017-02-09)
-=======================================
-
-* Fix bug where state was incorrectly reset in a room when synapse received an
-  event over federation that did not pass auth checks (PR #1892)
-
-
-Changes in synapse v0.19.0 (2017-02-04)
-=======================================
-
-No changes since RC 4.
-
-
-Changes in synapse v0.19.0-rc4 (2017-02-02)
-===========================================
-
-* Bump cache sizes for common membership queries (PR #1879)
-
-
-Changes in synapse v0.19.0-rc3 (2017-02-02)
-===========================================
-
-* Fix email push in pusher worker (PR #1875)
-* Make presence.get_new_events a bit faster (PR #1876)
-* Make /keys/changes a bit more performant (PR #1877)
-
-
-Changes in synapse v0.19.0-rc2 (2017-02-02)
-===========================================
-
-* Include newly joined users in /keys/changes API (PR #1872)
-
-
-Changes in synapse v0.19.0-rc1 (2017-02-02)
-===========================================
-
-Features:
-
-* Add support for specifying multiple bind addresses (PR #1709, #1712, #1795,
-  #1835). Thanks to @kyrias!
-* Add /account/3pid/delete endpoint (PR #1714)
-* Add config option to configure the Riot URL used in notification emails (PR
-  #1811). Thanks to @aperezdc!
-* Add username and password config options for turn server (PR #1832). Thanks
-  to @xsteadfastx!
-* Implement device lists updates over federation (PR #1857, #1861, #1864)
-* Implement /keys/changes (PR #1869, #1872)
-
-
-Changes:
-
-* Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph!
-* Log which files we saved attachments to in the media_repository (PR #1791)
-* Linearize updates to membership via PUT /state/ to better handle multiple
-  joins (PR #1787)
-* Limit number of entries to prefill from cache on startup (PR #1792)
-* Remove full_twisted_stacktraces option (PR #1802)
-* Measure size of some caches by sum of the size of cached values (PR #1815)
-* Measure metrics of string_cache (PR #1821)
-* Reduce logging verbosity (PR #1822, #1823, #1824)
-* Don't clobber a displayname or avatar_url if provided by an m.room.member
-  event (PR #1852)
-* Better handle 401/404 response for federation /send/ (PR #1866, #1871)
-
-
-Fixes:
-
-* Fix ability to change password to a non-ascii one (PR #1711)
-* Fix push getting stuck due to looking at the wrong view of state (PR #1820)
-* Fix email address comparison to be case insensitive (PR #1827)
-* Fix occasional inconsistencies of room membership (PR #1836, #1840)
-
-
-Performance:
-
-* Don't block messages sending on bumping presence (PR #1789)
-* Change device_inbox stream index to include user (PR #1793)
-* Optimise state resolution (PR #1818)
-* Use DB cache of joined users for presence (PR #1862)
-* Add an index to make membership queries faster (PR #1867)
-
-
-Changes in synapse v0.18.7 (2017-01-09)
-=======================================
-
-No changes from v0.18.7-rc2
-
-
-Changes in synapse v0.18.7-rc2 (2017-01-07)
-===========================================
-
-Bug fixes:
-
-* Fix error in rc1's discarding invalid inbound traffic logic that was
-  incorrectly discarding missing events
-
-
-Changes in synapse v0.18.7-rc1 (2017-01-06)
-===========================================
-
-Bug fixes:
-
-* Fix error in #PR 1764 to actually fix the nightmare #1753 bug.
-* Improve deadlock logging further
-* Discard inbound federation traffic from invalid domains, to immunise
-  against #1753
-
-
-Changes in synapse v0.18.6 (2017-01-06)
-=======================================
-
-Bug fixes:
-
-* Fix bug when checking if a guest user is allowed to join a room (PR #1772)
-  Thanks to Patrik Oldsberg for diagnosing and the fix!
-
-
-Changes in synapse v0.18.6-rc3 (2017-01-05)
-===========================================
-
-Bug fixes:
-
-* Fix bug where we failed to send ban events to the banned server (PR #1758)
-* Fix bug where we sent event that didn't originate on this server to
-  other servers (PR #1764)
-* Fix bug where processing an event from a remote server took a long time
-  because we were making long HTTP requests (PR #1765, PR #1744)
-
-Changes:
-
-* Improve logging for debugging deadlocks (PR #1766, PR #1767)
-
-
-Changes in synapse v0.18.6-rc2 (2016-12-30)
-===========================================
-
-Bug fixes:
-
-* Fix memory leak in twisted by initialising logging correctly (PR #1731)
-* Fix bug where fetching missing events took an unacceptable amount of time in
-  large rooms (PR #1734)
-
-
-Changes in synapse v0.18.6-rc1 (2016-12-29)
-===========================================
-
-Bug fixes:
-
-* Make sure that outbound connections are closed (PR #1725)
-
-
-Changes in synapse v0.18.5 (2016-12-16)
-=======================================
-
-Bug fixes:
-
-* Fix federation /backfill returning events it shouldn't (PR #1700)
-* Fix crash in url preview (PR #1701)
-
-
-Changes in synapse v0.18.5-rc3 (2016-12-13)
-===========================================
-
-Features:
-
-* Add support for E2E for guests (PR #1653)
-* Add new API appservice specific public room list (PR #1676)
-* Add new room membership APIs (PR #1680)
-
-
-Changes:
-
-* Enable guest access for private rooms by default (PR #653)
-* Limit the number of events that can be created on a given room concurrently
-  (PR #1620)
-* Log the args that we have on UI auth completion (PR #1649)
-* Stop generating refresh_tokens (PR #1654)
-* Stop putting a time caveat on access tokens (PR #1656)
-* Remove unspecced GET endpoints for e2e keys (PR #1694)
-
-
-Bug fixes:
-
-* Fix handling of 500 and 429's over federation (PR #1650)
-* Fix Content-Type header parsing (PR #1660)
-* Fix error when previewing sites that include unicode, thanks to kyrias (PR
-  #1664)
-* Fix some cases where we drop read receipts (PR #1678)
-* Fix bug where calls to ``/sync`` didn't correctly timeout (PR #1683)
-* Fix bug where E2E key query would fail if a single remote host failed (PR
-  #1686)
-
-
-
-Changes in synapse v0.18.5-rc2 (2016-11-24)
-===========================================
-
-Bug fixes:
-
-* Don't send old events over federation, fixes bug in -rc1.
-
-Changes in synapse v0.18.5-rc1 (2016-11-24)
-===========================================
-
-Features:
-
-* Implement "event_fields" in filters (PR #1638)
-
-Changes:
-
-* Use external ldap auth pacakge (PR #1628)
-* Split out federation transaction sending to a worker (PR #1635)
-* Fail with a coherent error message if `/sync?filter=` is invalid (PR #1636)
-* More efficient notif count queries (PR #1644)
-
-
-Changes in synapse v0.18.4 (2016-11-22)
-=======================================
-
-Bug fixes:
-
-* Add workaround for buggy clients that the fail to register (PR #1632)
-
-
-Changes in synapse v0.18.4-rc1 (2016-11-14)
-===========================================
-
-Changes:
-
-* Various database efficiency improvements (PR #1188, #1192)
-* Update default config to blacklist more internal IPs, thanks to Euan Kemp (PR
-  #1198)
-* Allow specifying duration in minutes in config, thanks to Daniel Dent (PR
-  #1625)
-
-
-Bug fixes:
-
-* Fix media repo to set CORs headers on responses (PR #1190)
-* Fix registration to not error on non-ascii passwords (PR #1191)
-* Fix create event code to limit the number of prev_events (PR #1615)
-* Fix bug in transaction ID deduplication (PR #1624)
-
-
-Changes in synapse v0.18.3 (2016-11-08)
-=======================================
-
-SECURITY UPDATE
-
-Explicitly require authentication when using LDAP3. This is the default on
-versions of ``ldap3`` above 1.0, but some distributions will package an older
-version.
-
-If you are using LDAP3 login and have a version of ``ldap3`` older than 1.0 it
-is **CRITICAL to updgrade**.
-
-
-Changes in synapse v0.18.2 (2016-11-01)
-=======================================
-
-No changes since v0.18.2-rc5
-
-
-Changes in synapse v0.18.2-rc5 (2016-10-28)
-===========================================
-
-Bug fixes:
-
-* Fix prometheus process metrics in worker processes (PR #1184)
-
-
-Changes in synapse v0.18.2-rc4 (2016-10-27)
-===========================================
-
-Bug fixes:
-
-* Fix ``user_threepids`` schema delta, which in some instances prevented
-  startup after upgrade (PR #1183)
-
-
-Changes in synapse v0.18.2-rc3 (2016-10-27)
-===========================================
-
-Changes:
-
-* Allow clients to supply access tokens as headers (PR #1098)
-* Clarify error codes for GET /filter/, thanks to Alexander Maznev (PR #1164)
-* Make password reset email field case insensitive (PR #1170)
-* Reduce redundant database work in email pusher (PR #1174)
-* Allow configurable rate limiting per AS (PR #1175)
-* Check whether to ratelimit sooner to avoid work (PR #1176)
-* Standardise prometheus metrics (PR #1177)
-
-
-Bug fixes:
-
-* Fix incredibly slow back pagination query (PR #1178)
-* Fix infinite typing bug (PR #1179)
-
-
-Changes in synapse v0.18.2-rc2 (2016-10-25)
-===========================================
-
-(This release did not include the changes advertised and was identical to RC1)
-
-
-Changes in synapse v0.18.2-rc1 (2016-10-17)
-===========================================
-
-Changes:
-
-* Remove redundant event_auth index (PR #1113)
-* Reduce DB hits for replication (PR #1141)
-* Implement pluggable password auth (PR #1155)
-* Remove rate limiting from app service senders and fix get_or_create_user
-  requester, thanks to Patrik Oldsberg (PR #1157)
-* window.postmessage for Interactive Auth fallback (PR #1159)
-* Use sys.executable instead of hardcoded python, thanks to Pedro Larroy
-  (PR #1162)
-* Add config option for adding additional TLS fingerprints (PR #1167)
-* User-interactive auth on delete device (PR #1168)
-
-
-Bug fixes:
-
-* Fix not being allowed to set your own state_key, thanks to Patrik Oldsberg
-  (PR #1150)
-* Fix interactive auth to return 401 from for incorrect password (PR #1160,
-  #1166)
-* Fix email push notifs being dropped (PR #1169)
-
-
-
-Changes in synapse v0.18.1 (2016-10-05)
-======================================
-
-No changes since v0.18.1-rc1
-
-
-Changes in synapse v0.18.1-rc1 (2016-09-30)
-===========================================
-
-Features:
-
-* Add total_room_count_estimate to ``/publicRooms`` (PR #1133)
-
-
-Changes:
-
-* Time out typing over federation (PR #1140)
-* Restructure LDAP authentication (PR #1153)
-
-
-Bug fixes:
-
-* Fix 3pid invites when server is already in the room (PR #1136)
-* Fix upgrading with SQLite taking lots of CPU for a few days
-  after upgrade (PR #1144)
-* Fix upgrading from very old database versions (PR #1145)
-* Fix port script to work with recently added tables (PR #1146)
-
-
-Changes in synapse v0.18.0 (2016-09-19)
-=======================================
-
-The release includes major changes to the state storage database schemas, which
-significantly reduce database size. Synapse will attempt to upgrade the current
-data in the background. Servers with large SQLite database may experience
-degradation of performance while this upgrade is in progress, therefore you may
-want to consider migrating to using Postgres before upgrading very large SQLite
-databases
-
-
-Changes:
-
-* Make public room search case insensitive (PR #1127)
-
-
-Bug fixes:
-
-* Fix and clean up publicRooms pagination (PR #1129)
-
-
-Changes in synapse v0.18.0-rc1 (2016-09-16)
-===========================================
-
-Features:
-
-* Add ``only=highlight`` on ``/notifications`` (PR #1081)
-* Add server param to /publicRooms (PR #1082)
-* Allow clients to ask for the whole of a single state event (PR #1094)
-* Add is_direct param to /createRoom (PR #1108)
-* Add pagination support to publicRooms (PR #1121)
-* Add very basic filter API to /publicRooms (PR #1126)
-* Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104,
-  #1111)
-
-
-Changes:
-
-* Move to storing state_groups_state as deltas, greatly reducing DB size (PR
-  #1065)
-* Reduce amount of state pulled out of the DB during common requests (PR #1069)
-* Allow PDF to be rendered from media repo (PR #1071)
-* Reindex state_groups_state after pruning (PR #1085)
-* Clobber EDUs in send queue (PR #1095)
-* Conform better to the CAS protocol specification (PR #1100)
-* Limit how often we ask for keys from dead servers (PR #1114)
-
-
-Bug fixes:
-
-* Fix /notifications API when used with ``from`` param (PR #1080)
-* Fix backfill when cannot find an event. (PR #1107)
-
-
-Changes in synapse v0.17.3 (2016-09-09)
-=======================================
-
-This release fixes a major bug that stopped servers from handling rooms with
-over 1000 members.
-
-
-Changes in synapse v0.17.2 (2016-09-08)
-=======================================
-
-This release contains security bug fixes. Please upgrade.
-
-
-No changes since v0.17.2-rc1
-
-
-Changes in synapse v0.17.2-rc1 (2016-09-05)
-===========================================
-
-Features:
-
-* Start adding store-and-forward direct-to-device messaging (PR #1046, #1050,
-  #1062, #1066)
-
-
-Changes:
-
-* Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063,
-  #1068)
-* Don't notify for online to online presence transitions. (PR #1054)
-* Occasionally persist unpersisted presence updates (PR #1055)
-* Allow application services to have an optional 'url' (PR #1056)
-* Clean up old sent transactions from DB (PR #1059)
-
-
-Bug fixes:
-
-* Fix None check in backfill (PR #1043)
-* Fix membership changes to be idempotent (PR #1067)
-* Fix bug in get_pdu where it would sometimes return events with incorrect
-  signature
-
-
-
-Changes in synapse v0.17.1 (2016-08-24)
-=======================================
-
-Changes:
-
-* Delete old received_transactions rows (PR #1038)
-* Pass through user-supplied content in /join/$room_id (PR #1039)
-
-
-Bug fixes:
-
-* Fix bug with backfill (PR #1040)
-
-
-Changes in synapse v0.17.1-rc1 (2016-08-22)
-===========================================
-
-Features:
-
-* Add notification API (PR #1028)
-
-
-Changes:
-
-* Don't print stack traces when failing to get remote keys (PR #996)
-* Various federation /event/ perf improvements (PR #998)
-* Only process one local membership event per room at a time (PR #1005)
-* Move default display name push rule (PR #1011, #1023)
-* Fix up preview URL API. Add tests. (PR #1015)
-* Set ``Content-Security-Policy`` on media repo (PR #1021)
-* Make notify_interested_services faster (PR #1022)
-* Add usage stats to prometheus monitoring (PR #1037)
-
-
-Bug fixes:
-
-* Fix token login (PR #993)
-* Fix CAS login (PR #994, #995)
-* Fix /sync to not clobber status_msg (PR #997)
-* Fix redacted state events to include prev_content (PR #1003)
-* Fix some bugs in the auth/ldap handler (PR #1007)
-* Fix backfill request to limit URI length, so that remotes don't reject the
-  requests due to path length limits (PR #1012)
-* Fix AS push code to not send duplicate events (PR #1025)
-
-
-
-Changes in synapse v0.17.0 (2016-08-08)
-=======================================
-
-This release contains significant security bug fixes regarding authenticating
-events received over federation. PLEASE UPGRADE.
-
-This release changes the LDAP configuration format in a backwards incompatible
-way, see PR #843 for details.
-
-
-Changes:
-
-* Add federation /version API (PR #990)
-* Make psutil dependency optional (PR #992)
-
-
-Bug fixes:
-
-* Fix URL preview API to exclude HTML comments in description (PR #988)
-* Fix error handling of remote joins (PR #991)
-
-
-Changes in synapse v0.17.0-rc4 (2016-08-05)
-===========================================
-
-Changes:
-
-* Change the way we summarize URLs when previewing (PR #973)
-* Add new ``/state_ids/`` federation API (PR #979)
-* Speed up processing of ``/state/`` response (PR #986)
-
-Bug fixes:
-
-* Fix event persistence when event has already been partially persisted
-  (PR #975, #983, #985)
-* Fix port script to also copy across backfilled events (PR #982)
-
-
-Changes in synapse v0.17.0-rc3 (2016-08-02)
-===========================================
-
-Changes:
-
-* Forbid non-ASes from registering users whose names begin with '_' (PR #958)
-* Add some basic admin API docs (PR #963)
-
-
-Bug fixes:
-
-* Send the correct host header when fetching keys (PR #941)
-* Fix joining a room that has missing auth events (PR #964)
-* Fix various push bugs (PR #966, #970)
-* Fix adding emails on registration (PR #968)
-
-
-Changes in synapse v0.17.0-rc2 (2016-08-02)
-===========================================
-
-(This release did not include the changes advertised and was identical to RC1)
-
-
-Changes in synapse v0.17.0-rc1 (2016-07-28)
-===========================================
-
-This release changes the LDAP configuration format in a backwards incompatible
-way, see PR #843 for details.
-
-
-Features:
-
-* Add purge_media_cache admin API (PR #902)
-* Add deactivate account admin API (PR #903)
-* Add optional pepper to password hashing (PR #907, #910 by KentShikama)
-* Add an admin option to shared secret registration (breaks backwards compat)
-  (PR #909)
-* Add purge local room history API (PR #911, #923, #924)
-* Add requestToken endpoints (PR #915)
-* Add an /account/deactivate endpoint (PR #921)
-* Add filter param to /messages. Add 'contains_url' to filter. (PR #922)
-* Add device_id support to /login (PR #929)
-* Add device_id support to /v2/register flow. (PR #937, #942)
-* Add GET /devices endpoint (PR #939, #944)
-* Add GET /device/{deviceId} (PR #943)
-* Add update and delete APIs for devices (PR #949)
-
-
-Changes:
-
-* Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
-* Linearize some federation endpoints based on (origin, room_id) (PR #879)
-* Remove the legacy v0 content upload API. (PR #888)
-* Use similar naming we use in email notifs for push (PR #894)
-* Optionally include password hash in createUser endpoint (PR #905 by
-  KentShikama)
-* Use a query that postgresql optimises better for get_events_around (PR #906)
-* Fall back to 'username' if 'user' is not given for appservice registration.
-  (PR #927 by Half-Shot)
-* Add metrics for psutil derived memory usage (PR #936)
-* Record device_id in client_ips (PR #938)
-* Send the correct host header when fetching keys (PR #941)
-* Log the hostname the reCAPTCHA was completed on (PR #946)
-* Make the device id on e2e key upload optional (PR #956)
-* Add r0.2.0 to the "supported versions" list (PR #960)
-* Don't include name of room for invites in push (PR #961)
-
-
-Bug fixes:
-
-* Fix substitution failure in mail template (PR #887)
-* Put most recent 20 messages in email notif (PR #892)
-* Ensure that the guest user is in the database when upgrading accounts
-  (PR #914)
-* Fix various edge cases in auth handling (PR #919)
-* Fix 500 ISE when sending alias event without a state_key (PR #925)
-* Fix bug where we stored rejections in the state_group, persist all
-  rejections (PR #948)
-* Fix lack of check of if the user is banned when handling 3pid invites
-  (PR #952)
-* Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
-
-
-
-Changes in synapse v0.16.1-r1 (2016-07-08)
-==========================================
-
-THIS IS A CRITICAL SECURITY UPDATE.
-
-This fixes a bug which allowed users' accounts to be accessed by unauthorised
-users.
-
-Changes in synapse v0.16.1 (2016-06-20)
-=======================================
-
-Bug fixes:
-
-* Fix assorted bugs in ``/preview_url`` (PR #872)
-* Fix TypeError when setting unicode passwords (PR #873)
-
-
-Performance improvements:
-
-* Turn ``use_frozen_events`` off by default (PR #877)
-* Disable responding with canonical json for federation (PR #878)
-
-
-Changes in synapse v0.16.1-rc1 (2016-06-15)
-===========================================
-
-Features: None
-
-Changes:
-
-* Log requester for ``/publicRoom`` endpoints when possible (PR #856)
-* 502 on ``/thumbnail`` when can't connect to remote server (PR #862)
-* Linearize fetching of gaps on incoming events (PR #871)
-
-
-Bugs fixes:
-
-* Fix bug where rooms where marked as published by default (PR #857)
-* Fix bug where joining room with an event with invalid sender (PR #868)
-* Fix bug where backfilled events were sent down sync streams (PR #869)
-* Fix bug where outgoing connections could wedge indefinitely, causing push
-  notifications to be unreliable (PR #870)
-
-
-Performance improvements:
-
-* Improve ``/publicRooms`` performance(PR #859)
-
-
-Changes in synapse v0.16.0 (2016-06-09)
-=======================================
-
-NB: As of v0.14 all AS config files must have an ID field.
-
-
-Bug fixes:
-
-* Don't make rooms published by default (PR #857)
-
-Changes in synapse v0.16.0-rc2 (2016-06-08)
-===========================================
-
-Features:
-
-* Add configuration option for tuning GC via ``gc.set_threshold`` (PR #849)
-
-Changes:
-
-* Record metrics about GC (PR #771, #847, #852)
-* Add metric counter for number of persisted events (PR #841)
-
-Bug fixes:
-
-* Fix 'From' header in email notifications (PR #843)
-* Fix presence where timeouts were not being fired for the first 8h after
-  restarts (PR #842)
-* Fix bug where synapse sent malformed transactions to AS's when retrying
-  transactions (Commits 310197b, 8437906)
-
-Performance improvements:
-
-* Remove event fetching from DB threads (PR #835)
-* Change the way we cache events (PR #836)
-* Add events to cache when we persist them (PR #840)
-
-
-Changes in synapse v0.16.0-rc1 (2016-06-03)
-===========================================
-
-Version 0.15 was not released. See v0.15.0-rc1 below for additional changes.
-
-Features:
-
-* Add email notifications for missed messages (PR #759, #786, #799, #810, #815,
-  #821)
-* Add a ``url_preview_ip_range_whitelist`` config param (PR #760)
-* Add /report endpoint (PR #762)
-* Add basic ignore user API (PR #763)
-* Add an openidish mechanism for proving that you own a given user_id (PR #765)
-* Allow clients to specify a server_name to avoid 'No known servers' (PR #794)
-* Add secondary_directory_servers option to fetch room list from other servers
-  (PR #808, #813)
-
-Changes:
-
-* Report per request metrics for all of the things using request_handler (PR
-  #756)
-* Correctly handle ``NULL`` password hashes from the database (PR #775)
-* Allow receipts for events we haven't seen in the db (PR #784)
-* Make synctl read a cache factor from config file (PR #785)
-* Increment badge count per missed convo, not per msg (PR #793)
-* Special case m.room.third_party_invite event auth to match invites (PR #814)
-
-
-Bug fixes:
-
-* Fix typo in event_auth servlet path (PR #757)
-* Fix password reset (PR #758)
-
-
-Performance improvements:
-
-* Reduce database inserts when sending transactions (PR #767)
-* Queue events by room for persistence (PR #768)
-* Add cache to ``get_user_by_id`` (PR #772)
-* Add and use ``get_domain_from_id`` (PR #773)
-* Use tree cache for ``get_linearized_receipts_for_room`` (PR #779)
-* Remove unused indices (PR #782)
-* Add caches to ``bulk_get_push_rules*`` (PR #804)
-* Cache ``get_event_reference_hashes`` (PR #806)
-* Add ``get_users_with_read_receipts_in_room`` cache (PR #809)
-* Use state to calculate ``get_users_in_room`` (PR #811)
-* Load push rules in storage layer so that they get cached (PR #825)
-* Make ``get_joined_hosts_for_room`` use get_users_in_room (PR #828)
-* Poke notifier on next reactor tick (PR #829)
-* Change CacheMetrics to be quicker (PR #830)
-
-
-Changes in synapse v0.15.0-rc1 (2016-04-26)
-===========================================
-
-Features:
-
-* Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck
-  (PR #671,#687)
-* Add URL previewing support (PR #688)
-* Add login support for LDAP, thanks to Christoph Witzany (PR #701)
-* Add GET endpoint for pushers (PR #716)
-
-Changes:
-
-* Never notify for member events (PR #667)
-* Deduplicate identical ``/sync`` requests (PR #668)
-* Require user to have left room to forget room (PR #673)
-* Use DNS cache if within TTL (PR #677)
-* Let users see their own leave events (PR #699)
-* Deduplicate membership changes (PR #700)
-* Increase performance of pusher code (PR #705)
-* Respond with error status 504 if failed to talk to remote server (PR #731)
-* Increase search performance on postgres (PR #745)
-
-Bug fixes:
-
-* Fix bug where disabling all notifications still resulted in push (PR #678)
-* Fix bug where users couldn't reject remote invites if remote refused (PR #691)
-* Fix bug where synapse attempted to backfill from itself (PR #693)
-* Fix bug where profile information was not correctly added when joining remote
-  rooms (PR #703)
-* Fix bug where register API required incorrect key name for AS registration
-  (PR #727)
-
-
-Changes in synapse v0.14.0 (2016-03-30)
-=======================================
-
-No changes from v0.14.0-rc2
-
-Changes in synapse v0.14.0-rc2 (2016-03-23)
-===========================================
-
-Features:
-
-* Add published room list API (PR #657)
-
-Changes:
-
-* Change various caches to consume less memory (PR #656, #658, #660, #662,
-  #663, #665)
-* Allow rooms to be published without requiring an alias (PR #664)
-* Intern common strings in caches to reduce memory footprint (#666)
-
-Bug fixes:
-
-* Fix reject invites over federation (PR #646)
-* Fix bug where registration was not idempotent (PR #649)
-* Update aliases event after deleting aliases (PR #652)
-* Fix unread notification count, which was sometimes wrong (PR #661)
-
-Changes in synapse v0.14.0-rc1 (2016-03-14)
-===========================================
-
-Features:
-
-* Add event_id to response to state event PUT (PR #581)
-* Allow guest users access to messages in rooms they have joined (PR #587)
-* Add config for what state is included in a room invite (PR #598)
-* Send the inviter's member event in room invite state (PR #607)
-* Add error codes for malformed/bad JSON in /login (PR #608)
-* Add support for changing the actions for default rules (PR #609)
-* Add environment variable SYNAPSE_CACHE_FACTOR, default it to 0.1 (PR #612)
-* Add ability for alias creators to delete aliases (PR #614)
-* Add profile information to invites (PR #624)
-
-Changes:
-
-* Enforce user_id exclusivity for AS registrations (PR #572)
-* Make adding push rules idempotent (PR #587)
-* Improve presence performance (PR #582, #586)
-* Change presence semantics for ``last_active_ago`` (PR #582, #586)
-* Don't allow ``m.room.create`` to be changed (PR #596)
-* Add 800x600 to default list of valid thumbnail sizes (PR #616)
-* Always include kicks and bans in full /sync (PR #625)
-* Send history visibility on boundary changes (PR #626)
-* Register endpoint now returns a refresh_token (PR #637)
-
-Bug fixes:
-
-* Fix bug where we returned incorrect state in /sync (PR #573)
-* Always return a JSON object from push rule API (PR #606)
-* Fix bug where registering without a user id sometimes failed (PR #610)
-* Report size of ExpiringCache in cache size metrics (PR #611)
-* Fix rejection of invites to empty rooms (PR #615)
-* Fix usage of ``bcrypt`` to not use ``checkpw`` (PR #619)
-* Pin ``pysaml2`` dependency (PR #634)
-* Fix bug in ``/sync`` where timeline order was incorrect for backfilled events
-  (PR #635)
-
-Changes in synapse v0.13.3 (2016-02-11)
-=======================================
-
-* Fix bug where ``/sync`` would occasionally return events in the wrong room.
-
-Changes in synapse v0.13.2 (2016-02-11)
-=======================================
-
-* Fix bug where ``/events`` would fail to skip some events if there had been
-  more events than the limit specified since the last request (PR #570)
-
-Changes in synapse v0.13.1 (2016-02-10)
-=======================================
-
-* Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to
-  pull in the fix for SYWEB-361 so that the default client can display
-  HTML messages again(!)
-
-Changes in synapse v0.13.0 (2016-02-10)
-=======================================
-
-This version includes an upgrade of the schema, specifically adding an index to
-the ``events`` table. This may cause synapse to pause for several minutes the
-first time it is started after the upgrade.
-
-Changes:
-
-* Improve general performance (PR #540, #543. #544, #54, #549, #567)
-* Change guest user ids to be incrementing integers (PR #550)
-* Improve performance of public room list API (PR #552)
-* Change profile API to omit keys rather than return null (PR #557)
-* Add ``/media/r0`` endpoint prefix, which is equivalent to ``/media/v1/``
-  (PR #595)
-
-Bug fixes:
-
-* Fix bug with upgrading guest accounts where it would fail if you opened the
-  registration email on a different device (PR #547)
-* Fix bug where unread count could be wrong (PR #568)
-
-
-
-Changes in synapse v0.12.1-rc1 (2016-01-29)
-===========================================
-
-Features:
-
-* Add unread notification counts in ``/sync`` (PR #456)
-* Add support for inviting 3pids in ``/createRoom`` (PR #460)
-* Add ability for guest accounts to upgrade (PR #462)
-* Add ``/versions`` API (PR #468)
-* Add ``event`` to ``/context`` API (PR #492)
-* Add specific error code for invalid user names in ``/register`` (PR #499)
-* Add support for push badge counts (PR #507)
-* Add support for non-guest users to peek in rooms using ``/events`` (PR #510)
-
-Changes:
-
-* Change ``/sync`` so that guest users only get rooms they've joined (PR #469)
-* Change to require unbanning before other membership changes (PR #501)
-* Change default push rules to notify for all messages (PR #486)
-* Change default push rules to not notify on membership changes (PR #514)
-* Change default push rules in one to one rooms to only notify for events that
-  are messages (PR #529)
-* Change ``/sync`` to reject requests with a ``from`` query param (PR #512)
-* Change server manhole to use SSH rather than telnet (PR #473)
-* Change server to require AS users to be registered before use (PR #487)
-* Change server not to start when ASes are invalidly configured (PR #494)
-* Change server to require ID and ``as_token`` to be unique for AS's (PR #496)
-* Change maximum pagination limit to 1000 (PR #497)
-
-Bug fixes:
-
-* Fix bug where ``/sync`` didn't return when something under the leave key
-  changed (PR #461)
-* Fix bug where we returned smaller rather than larger than requested
-  thumbnails when ``method=crop`` (PR #464)
-* Fix thumbnails API to only return cropped thumbnails when asking for a
-  cropped thumbnail (PR #475)
-* Fix bug where we occasionally still logged access tokens (PR #477)
-* Fix bug where ``/events`` would always return immediately for guest users
-  (PR #480)
-* Fix bug where ``/sync`` unexpectedly returned old left rooms (PR #481)
-* Fix enabling and disabling push rules (PR #498)
-* Fix bug where ``/register`` returned 500 when given unicode username
-  (PR #513)
-
-Changes in synapse v0.12.0 (2016-01-04)
-=======================================
-
-* Expose ``/login`` under ``r0`` (PR #459)
-
-Changes in synapse v0.12.0-rc3 (2015-12-23)
-===========================================
-
-* Allow guest accounts access to ``/sync`` (PR #455)
-* Allow filters to include/exclude rooms at the room level
-  rather than just from the components of the sync for each
-  room. (PR #454)
-* Include urls for room avatars in the response to ``/publicRooms`` (PR #453)
-* Don't set a identicon as the avatar for a user when they register (PR #450)
-* Add a ``display_name`` to third-party invites (PR #449)
-* Send more information to the identity server for third-party invites so that
-  it can send richer messages to the invitee (PR #446)
-* Cache the responses to ``/initialSync`` for 5 minutes. If a client
-  retries a request to ``/initialSync`` before the a response was computed
-  to the first request then the same response is used for both requests
-  (PR #457)
-* Fix a bug where synapse would always request the signing keys of
-  remote servers even when the key was cached locally (PR #452)
-* Fix 500 when pagination search results (PR #447)
-* Fix a bug where synapse was leaking raw email address in third-party invites
-  (PR #448)
-
-Changes in synapse v0.12.0-rc2 (2015-12-14)
-===========================================
-
-* Add caches for whether rooms have been forgotten by a user (PR #434)
-* Remove instructions to use ``--process-dependency-link`` since all of the
-  dependencies of synapse are on PyPI (PR #436)
-* Parallelise the processing of ``/sync`` requests (PR #437)
-* Fix race updating presence in ``/events`` (PR #444)
-* Fix bug back-populating search results (PR #441)
-* Fix bug calculating state in ``/sync`` requests (PR #442)
-
-Changes in synapse v0.12.0-rc1 (2015-12-10)
-===========================================
-
-* Host the client APIs released as r0 by
-  https://matrix.org/docs/spec/r0.0.0/client_server.html
-  on paths prefixed by ``/_matrix/client/r0``. (PR #430, PR #415, PR #400)
-* Updates the client APIs to match r0 of the matrix specification.
-
-  * All APIs return events in the new event format, old APIs also include
-    the fields needed to parse the event using the old format for
-    compatibility. (PR #402)
-  * Search results are now given as a JSON array rather than
-    a JSON object (PR #405)
-  * Miscellaneous changes to search (PR #403, PR #406, PR #412)
-  * Filter JSON objects may now be passed as query parameters to ``/sync``
-    (PR #431)
-  * Fix implementation of ``/admin/whois`` (PR #418)
-  * Only include the rooms that user has left in ``/sync`` if the client
-    requests them in the filter (PR #423)
-  * Don't push for ``m.room.message`` by default (PR #411)
-  * Add API for setting per account user data (PR #392)
-  * Allow users to forget rooms (PR #385)
-
-* Performance improvements and monitoring:
-
-  * Add per-request counters for CPU time spent on the main python thread.
-    (PR #421, PR #420)
-  * Add per-request counters for time spent in the database (PR #429)
-  * Make state updates in the C+S API idempotent (PR #416)
-  * Only fire ``user_joined_room`` if the user has actually joined. (PR #410)
-  * Reuse a single http client, rather than creating new ones (PR #413)
-
-* Fixed a bug upgrading from older versions of synapse on postgresql (PR #417)
-
-Changes in synapse v0.11.1 (2015-11-20)
-=======================================
-
-* Add extra options to search API (PR #394)
-* Fix bug where we did not correctly cap federation retry timers. This meant it
-  could take several hours for servers to start talking to ressurected servers,
-  even when they were receiving traffic from them (PR #393)
-* Don't advertise login token flow unless CAS is enabled. This caused issues
-  where some clients would always use the fallback API if they did not
-  recognize all login flows (PR #391)
-* Change /v2 sync API to rename ``private_user_data`` to ``account_data``
-  (PR #386)
-* Change /v2 sync API to remove the ``event_map`` and rename keys in ``rooms``
-  object (PR #389)
-
-Changes in synapse v0.11.0-r2 (2015-11-19)
-==========================================
-
-* Fix bug in database port script (PR #387)
-
-Changes in synapse v0.11.0-r1 (2015-11-18)
-==========================================
-
-* Retry and fail federation requests more aggressively for requests that block
-  client side requests (PR #384)
-
-Changes in synapse v0.11.0 (2015-11-17)
-=======================================
-
-* Change CAS login API (PR #349)
-
-Changes in synapse v0.11.0-rc2 (2015-11-13)
-===========================================
-
-* Various changes to /sync API response format (PR #373)
-* Fix regression when setting display name in newly joined room over
-  federation (PR #368)
-* Fix problem where /search was slow when using SQLite (PR #366)
-
-Changes in synapse v0.11.0-rc1 (2015-11-11)
-===========================================
-
-* Add Search API (PR #307, #324, #327, #336, #350, #359)
-* Add 'archived' state to v2 /sync API (PR #316)
-* Add ability to reject invites (PR #317)
-* Add config option to disable password login (PR #322)
-* Add the login fallback API (PR #330)
-* Add room context API (PR #334)
-* Add room tagging support (PR #335)
-* Update v2 /sync API to match spec (PR #305, #316, #321, #332, #337, #341)
-* Change retry schedule for application services (PR #320)
-* Change retry schedule for remote servers (PR #340)
-* Fix bug where we hosted static content in the incorrect place (PR #329)
-* Fix bug where we didn't increment retry interval for remote servers (PR #343)
-
-Changes in synapse v0.10.1-rc1 (2015-10-15)
-===========================================
-
-* Add support for CAS, thanks to Steven Hammerton (PR #295, #296)
-* Add support for using macaroons for ``access_token`` (PR #256, #229)
-* Add support for ``m.room.canonical_alias`` (PR #287)
-* Add support for viewing the history of rooms that they have left. (PR #276,
-  #294)
-* Add support for refresh tokens (PR #240)
-* Add flag on creation which disables federation of the room (PR #279)
-* Add some room state to invites. (PR #275)
-* Atomically persist events when joining a room over federation (PR #283)
-* Change default history visibility for private rooms (PR #271)
-* Allow users to redact their own sent events (PR #262)
-* Use tox for tests (PR #247)
-* Split up syutil into separate libraries (PR #243)
-
-Changes in synapse v0.10.0-r2 (2015-09-16)
-==========================================
-
-* Fix bug where we always fetched remote server signing keys instead of using
-  ones in our cache.
-* Fix adding threepids to an existing account.
-* Fix bug with invinting over federation where remote server was already in
-  the room. (PR #281, SYN-392)
-
-Changes in synapse v0.10.0-r1 (2015-09-08)
-==========================================
-
-* Fix bug with python packaging
-
-Changes in synapse v0.10.0 (2015-09-03)
-=======================================
-
-No change from release candidate.
-
-Changes in synapse v0.10.0-rc6 (2015-09-02)
-===========================================
-
-* Remove some of the old database upgrade scripts.
-* Fix database port script to work with newly created sqlite databases.
-
-Changes in synapse v0.10.0-rc5 (2015-08-27)
-===========================================
-
-* Fix bug that broke downloading files with ascii filenames across federation.
-
-Changes in synapse v0.10.0-rc4 (2015-08-27)
-===========================================
-
-* Allow UTF-8 filenames for upload. (PR #259)
-
-Changes in synapse v0.10.0-rc3 (2015-08-25)
-===========================================
-
-* Add ``--keys-directory`` config option to specify where files such as
-  certs and signing keys should be stored in, when using ``--generate-config``
-  or ``--generate-keys``. (PR #250)
-* Allow ``--config-path`` to specify a directory, causing synapse to use all
-  \*.yaml files in the directory as config files. (PR #249)
-* Add ``web_client_location`` config option to specify static files to be
-  hosted by synapse under ``/_matrix/client``. (PR #245)
-* Add helper utility to synapse to read and parse the config files and extract
-  the value of a given key. For example::
-
-    $ python -m synapse.config read server_name -c homeserver.yaml
-    localhost
-
-  (PR #246)
-
-
-Changes in synapse v0.10.0-rc2 (2015-08-24)
-===========================================
-
-* Fix bug where we incorrectly populated the ``event_forward_extremities``
-  table, resulting in problems joining large remote rooms (e.g.
-  ``#matrix:matrix.org``)
-* Reduce the number of times we wake up pushers by not listening for presence
-  or typing events, reducing the CPU cost of each pusher.
-
-
-Changes in synapse v0.10.0-rc1 (2015-08-21)
-===========================================
-
-Also see v0.9.4-rc1 changelog, which has been amalgamated into this release.
-
-General:
-
-* Upgrade to Twisted 15 (PR #173)
-* Add support for serving and fetching encryption keys over federation.
-  (PR #208)
-* Add support for logging in with email address (PR #234)
-* Add support for new ``m.room.canonical_alias`` event. (PR #233)
-* Change synapse to treat user IDs case insensitively during registration and
-  login. (If two users already exist with case insensitive matching user ids,
-  synapse will continue to require them to specify their user ids exactly.)
-* Error if a user tries to register with an email already in use. (PR #211)
-* Add extra and improve existing caches  (PR #212, #219, #226, #228)
-* Batch various storage request (PR #226, #228)
-* Fix bug where we didn't correctly log the entity that triggered the request
-  if the request came in via an application service (PR #230)
-* Fix bug where we needlessly regenerated the full list of rooms an AS is
-  interested in. (PR #232)
-* Add support for AS's to use v2_alpha registration API (PR #210)
-
-
-Configuration:
-
-* Add ``--generate-keys`` that will generate any missing cert and key files in
-  the configuration files. This is equivalent to running ``--generate-config``
-  on an existing configuration file. (PR #220)
-* ``--generate-config`` now no longer requires a ``--server-name`` parameter
-  when used on existing configuration files. (PR #220)
-* Add ``--print-pidfile`` flag that controls the printing of the pid to stdout
-  of the demonised process. (PR #213)
-
-Media Repository:
-
-* Fix bug where we picked a lower resolution image than requested. (PR #205)
-* Add support for specifying if a the media repository should dynamically
-  thumbnail images or not. (PR #206)
-
-Metrics:
-
-* Add statistics from the reactor to the metrics API. (PR #224, #225)
-
-Demo Homeservers:
-
-* Fix starting the demo homeservers without rate-limiting enabled. (PR #182)
-* Fix enabling registration on demo homeservers (PR #223)
-
-
-Changes in synapse v0.9.4-rc1 (2015-07-21)
-==========================================
-
-General:
-
-* Add basic implementation of receipts. (SPEC-99)
-* Add support for configuration presets in room creation API. (PR  #203)
-* Add auth event that limits the visibility of history for new users.
-  (SPEC-134)
-* Add SAML2 login/registration support. (PR  #201. Thanks Muthu Subramanian!)
-* Add client side key management APIs for end to end encryption. (PR #198)
-* Change power level semantics so that you cannot kick, ban or change power
-  levels of users that have equal or greater power level than you. (SYN-192)
-* Improve performance by bulk inserting events where possible. (PR #193)
-* Improve performance by bulk verifying signatures where possible. (PR #194)
-
-
-Configuration:
-
-* Add support for including TLS certificate chains.
-
-Media Repository:
-
-* Add Content-Disposition headers to content repository responses. (SYN-150)
-
-
-Changes in synapse v0.9.3 (2015-07-01)
-======================================
-
-No changes from v0.9.3 Release Candidate 1.
-
-Changes in synapse v0.9.3-rc1 (2015-06-23)
-==========================================
-
-General:
-
-* Fix a memory leak in the notifier. (SYN-412)
-* Improve performance of room initial sync. (SYN-418)
-* General improvements to logging.
-* Remove ``access_token`` query params from ``INFO`` level logging.
-
-Configuration:
-
-* Add support for specifying and configuring multiple listeners. (SYN-389)
-
-Application services:
-
-* Fix bug where synapse failed to send user queries to application services.
-
-Changes in synapse v0.9.2-r2 (2015-06-15)
-=========================================
-
-Fix packaging so that schema delta python files get included in the package.
-
-Changes in synapse v0.9.2 (2015-06-12)
-======================================
-
-General:
-
-* Use ultrajson for json (de)serialisation when a canonical encoding is not
-  required. Ultrajson is significantly faster than simplejson in certain
-  circumstances.
-* Use connection pools for outgoing HTTP connections.
-* Process thumbnails on separate threads.
-
-Configuration:
-
-* Add option, ``gzip_responses``, to disable HTTP response compression.
-
-Federation:
-
-* Improve resilience of backfill by ensuring we fetch any missing auth events.
-* Improve performance of backfill and joining remote rooms by removing
-  unnecessary computations. This included handling events we'd previously
-  handled as well as attempting to compute the current state for outliers.
-
-
-Changes in synapse v0.9.1 (2015-05-26)
-======================================
-
-General:
-
-* Add support for backfilling when a client paginates. This allows servers to
-  request history for a room from remote servers when a client tries to
-  paginate history the server does not have - SYN-36
-* Fix bug where you couldn't disable non-default pushrules - SYN-378
-* Fix ``register_new_user`` script - SYN-359
-* Improve performance of fetching events from the database, this improves both
-  initialSync and sending of events.
-* Improve performance of event streams, allowing synapse to handle more
-  simultaneous connected clients.
-
-Federation:
-
-* Fix bug with existing backfill implementation where it returned the wrong
-  selection of events in some circumstances.
-* Improve performance of joining remote rooms.
-
-Configuration:
-
-* Add support for changing the bind host of the metrics listener via the
-  ``metrics_bind_host`` option.
-
-
-Changes in synapse v0.9.0-r5 (2015-05-21)
-=========================================
-
-* Add more database caches to reduce amount of work done for each pusher. This
-  radically reduces CPU usage when multiple pushers are set up in the same room.
-
-Changes in synapse v0.9.0 (2015-05-07)
-======================================
-
-General:
-
-* Add support for using a PostgreSQL database instead of SQLite. See
-  `docs/postgres.rst`_ for details.
-* Add password change and reset APIs. See `Registration`_ in the spec.
-* Fix memory leak due to not releasing stale notifiers - SYN-339.
-* Fix race in caches that occasionally caused some presence updates to be
-  dropped - SYN-369.
-* Check server name has not changed on restart.
-* Add a sample systemd unit file and a logger configuration in
-  contrib/systemd. Contributed Ivan Shapovalov.
-
-Federation:
-
-* Add key distribution mechanisms for fetching public keys of unavailable
-  remote home servers. See `Retrieving Server Keys`_ in the spec.
-
-Configuration:
-
-* Add support for multiple config files.
-* Add support for dictionaries in config files.
-* Remove support for specifying config options on the command line, except
-  for:
-
-  * ``--daemonize`` - Daemonize the home server.
-  * ``--manhole`` - Turn on the twisted telnet manhole service on the given
-    port.
-  * ``--database-path`` - The path to a sqlite database to use.
-  * ``--verbose`` - The verbosity level.
-  * ``--log-file`` - File to log to.
-  * ``--log-config`` - Python logging config file.
-  * ``--enable-registration`` - Enable registration for new users.
-
-Application services:
-
-* Reliably retry sending of events from Synapse to application services, as per
-  `Application Services`_ spec.
-* Application services can no longer register via the ``/register`` API,
-  instead their configuration should be saved to a file and listed in the
-  synapse ``app_service_config_files`` config option. The AS configuration file
-  has the same format as the old ``/register`` request.
-  See `docs/application_services.rst`_ for more information.
-
-.. _`docs/postgres.rst`: docs/postgres.rst
-.. _`docs/application_services.rst`: docs/application_services.rst
-.. _`Registration`: https://github.com/matrix-org/matrix-doc/blob/master/specification/10_client_server_api.rst#registration
-.. _`Retrieving Server Keys`: https://github.com/matrix-org/matrix-doc/blob/6f2698/specification/30_server_server_api.rst#retrieving-server-keys
-.. _`Application Services`: https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api
-
-Changes in synapse v0.8.1 (2015-03-18)
-======================================
-
-* Disable registration by default. New users can be added using the command
-  ``register_new_matrix_user`` or by enabling registration in the config.
-* Add metrics to synapse. To enable metrics use config options
-  ``enable_metrics`` and ``metrics_port``.
-* Fix bug where banning only kicked the user.
-
-Changes in synapse v0.8.0 (2015-03-06)
-======================================
-
-General:
-
-* Add support for registration fallback. This is a page hosted on the server
-  which allows a user to register for an account, regardless of what client
-  they are using (e.g. mobile devices).
-
-* Added new default push rules and made them configurable by clients:
-
-  * Suppress all notice messages.
-  * Notify when invited to a new room.
-  * Notify for messages that don't match any rule.
-  * Notify on incoming call.
-
-Federation:
-
-* Added per host server side rate-limiting of incoming federation requests.
-* Added a ``/get_missing_events/`` API to federation to reduce number of
-  ``/events/`` requests.
-
-Configuration:
-
-* Added configuration option to disable registration:
-  ``disable_registration``.
-* Added configuration option to change soft limit of number of open file
-  descriptors: ``soft_file_limit``.
-* Make ``tls_private_key_path`` optional when running with ``no_tls``.
-
-Application services:
-
-* Application services can now poll on the CS API ``/events`` for their events,
-  by providing their application service ``access_token``.
-* Added exclusive namespace support to application services API.
-
-
-Changes in synapse v0.7.1 (2015-02-19)
-======================================
-
-* Initial alpha implementation of parts of the Application Services API.
-  Including:
-
-  - AS Registration / Unregistration
-  - User Query API
-  - Room Alias Query API
-  - Push transport for receiving events.
-  - User/Alias namespace admin control
-
-* Add cache when fetching events from remote servers to stop repeatedly
-  fetching events with bad signatures.
-* Respect the per remote server retry scheme when fetching both events and
-  server keys to reduce the number of times we send requests to dead servers.
-* Inform remote servers when the local server fails to handle a received event.
-* Turn off python bytecode generation due to problems experienced when
-  upgrading from previous versions.
-
-Changes in synapse v0.7.0 (2015-02-12)
-======================================
-
-* Add initial implementation of the query auth federation API, allowing
-  servers to agree on whether an event should be allowed or rejected.
-* Persist events we have rejected from federation, fixing the bug where
-  servers would keep requesting the same events.
-* Various federation performance improvements, including:
-
-  - Add in memory caches on queries such as:
-
-     * Computing the state of a room at a point in time, used for
-       authorization on federation requests.
-     * Fetching events from the database.
-     * User's room membership, used for authorizing presence updates.
-
-  - Upgraded JSON library to improve parsing and serialisation speeds.
-
-* Add default avatars to new user accounts using pydenticon library.
-* Correctly time out federation requests.
-* Retry federation requests against different servers.
-* Add support for push and push rules.
-* Add alpha versions of proposed new CSv2 APIs, including ``/sync`` API.
-
-Changes in synapse 0.6.1 (2015-01-07)
-=====================================
-
-* Major optimizations to improve performance of initial sync and event sending
-  in large rooms (by up to 10x)
-* Media repository now includes a Content-Length header on media downloads.
-* Improve quality of thumbnails by changing resizing algorithm.
-
-Changes in synapse 0.6.0 (2014-12-16)
-=====================================
-
-* Add new API for media upload and download that supports thumbnailing.
-* Replicate media uploads over multiple homeservers so media is always served
-  to clients from their local homeserver.  This obsoletes the
-  --content-addr parameter and confusion over accessing content directly
-  from remote homeservers.
-* Implement exponential backoff when retrying federation requests when
-  sending to remote homeservers which are offline.
-* Implement typing notifications.
-* Fix bugs where we sent events with invalid signatures due to bugs where
-  we incorrectly persisted events.
-* Improve performance of database queries involving retrieving events.
-
-Changes in synapse 0.5.4a (2014-12-13)
-======================================
-
-* Fix bug while generating the error message when a file path specified in
-  the config doesn't exist.
-
-Changes in synapse 0.5.4 (2014-12-03)
-=====================================
-
-* Fix presence bug where some rooms did not display presence updates for
-  remote users.
-* Do not log SQL timing log lines when started with "-v"
-* Fix potential memory leak.
-
-Changes in synapse 0.5.3c (2014-12-02)
-======================================
-
-* Change the default value for the `content_addr` option to use the HTTP
-  listener, as by default the HTTPS listener will be using a self-signed
-  certificate.
-
-Changes in synapse 0.5.3 (2014-11-27)
-=====================================
-
-* Fix bug that caused joining a remote room to fail if a single event was not
-  signed correctly.
-* Fix bug which caused servers to continuously try and fetch events from other
-  servers.
-
-Changes in synapse 0.5.2 (2014-11-26)
-=====================================
-
-Fix major bug that caused rooms to disappear from peoples initial sync.
-
-Changes in synapse 0.5.1 (2014-11-26)
-=====================================
-See UPGRADES.rst for specific instructions on how to upgrade.
-
- * Fix bug where we served up an Event that did not match its signatures.
- * Fix regression where we no longer correctly handled the case where a
-   homeserver receives an event for a room it doesn't recognise (but is in.)
-
-Changes in synapse 0.5.0 (2014-11-19)
-=====================================
-This release includes changes to the federation protocol and client-server API
-that is not backwards compatible.
-
-This release also changes the internal database schemas and so requires servers to
-drop their current history. See UPGRADES.rst for details.
-
-Homeserver:
- * Add authentication and authorization to the federation protocol. Events are
-   now signed by their originating homeservers.
- * Implement the new authorization model for rooms.
- * Split out web client into a seperate repository: matrix-angular-sdk.
- * Change the structure of PDUs.
- * Fix bug where user could not join rooms via an alias containing 4-byte
-   UTF-8 characters.
- * Merge concept of PDUs and Events internally.
- * Improve logging by adding request ids to log lines.
- * Implement a very basic room initial sync API.
- * Implement the new invite/join federation APIs.
-
-Webclient:
- * The webclient has been moved to a seperate repository.
-
-Changes in synapse 0.4.2 (2014-10-31)
-=====================================
-
-Homeserver:
- * Fix bugs where we did not notify users of correct presence updates.
- * Fix bug where we did not handle sub second event stream timeouts.
-
-Webclient:
- * Add ability to click on messages to see JSON.
- * Add ability to redact messages.
- * Add ability to view and edit all room state JSON.
- * Handle incoming redactions.
- * Improve feedback on errors.
- * Fix bugs in mobile CSS.
- * Fix bugs with desktop notifications.
-
-Changes in synapse 0.4.1 (2014-10-17)
-=====================================
-Webclient:
- * Fix bug with display of timestamps.
-
-Changes in synpase 0.4.0 (2014-10-17)
-=====================================
-This release includes changes to the federation protocol and client-server API
-that is not backwards compatible.
-
-The Matrix specification has been moved to a separate git repository:
-http://github.com/matrix-org/matrix-doc
-
-You will also need an updated syutil and config. See UPGRADES.rst.
-
-Homeserver:
- * Sign federation transactions to assert strong identity over federation.
- * Rename timestamp keys in PDUs and events from 'ts' and 'hsob_ts' to 'origin_server_ts'.
-
-
-Changes in synapse 0.3.4 (2014-09-25)
-=====================================
-This version adds support for using a TURN server. See docs/turn-howto.rst on
-how to set one up.
-
-Homeserver:
- * Add support for redaction of messages.
- * Fix bug where inviting a user on a remote home server could take up to
-   20-30s.
- * Implement a get current room state API.
- * Add support specifying and retrieving turn server configuration.
-
-Webclient:
- * Add button to send messages to users from the home page.
- * Add support for using TURN for VoIP calls.
- * Show display name change messages.
- * Fix bug where the client didn't get the state of a newly joined room
-   until after it has been refreshed.
- * Fix bugs with tab complete.
- * Fix bug where holding down the down arrow caused chrome to chew 100% CPU.
- * Fix bug where desktop notifications occasionally used "Undefined" as the
-   display name.
- * Fix more places where we sometimes saw room IDs incorrectly.
- * Fix bug which caused lag when entering text in the text box.
-
-Changes in synapse 0.3.3 (2014-09-22)
-=====================================
-
-Homeserver:
- * Fix bug where you continued to get events for rooms you had left.
-
-Webclient:
- * Add support for video calls with basic UI.
- * Fix bug where one to one chats were named after your display name rather
-   than the other person's.
- * Fix bug which caused lag when typing in the textarea.
- * Refuse to run on browsers we know won't work.
- * Trigger pagination when joining new rooms.
- * Fix bug where we sometimes didn't display invitations in recents.
- * Automatically join room when accepting a VoIP call.
- * Disable outgoing and reject incoming calls on browsers we don't support
-   VoIP in.
- * Don't display desktop notifications for messages in the room you are
-   non-idle and speaking in.
-
-Changes in synapse 0.3.2 (2014-09-18)
-=====================================
-
-Webclient:
- * Fix bug where an empty "bing words" list in old accounts didn't send
-   notifications when it should have done.
-
-Changes in synapse 0.3.1 (2014-09-18)
-=====================================
-This is a release to hotfix v0.3.0 to fix two regressions.
-
-Webclient:
- * Fix a regression where we sometimes displayed duplicate events.
- * Fix a regression where we didn't immediately remove rooms you were
-   banned in from the recents list.
-
-Changes in synapse 0.3.0 (2014-09-18)
-=====================================
-See UPGRADE for information about changes to the client server API, including
-breaking backwards compatibility with VoIP calls and registration API.
-
-Homeserver:
- * When a user changes their displayname or avatar the server will now update
-   all their join states to reflect this.
- * The server now adds "age" key to events to indicate how old they are. This
-   is clock independent, so at no point does any server or webclient have to
-   assume their clock is in sync with everyone else.
- * Fix bug where we didn't correctly pull in missing PDUs.
- * Fix bug where prev_content key wasn't always returned.
- * Add support for password resets.
-
-Webclient:
- * Improve page content loading.
- * Join/parts now trigger desktop notifications.
- * Always show room aliases in the UI if one is present.
- * No longer show user-count in the recents side panel.
- * Add up & down arrow support to the text box for message sending to step
-   through your sent history.
- * Don't display notifications for our own messages.
- * Emotes are now formatted correctly in desktop notifications.
- * The recents list now differentiates between public & private rooms.
- * Fix bug where when switching between rooms the pagination flickered before
-   the view jumped to the bottom of the screen.
- * Add bing word support.
-
-Registration API:
- * The registration API has been overhauled to function like the login API. In
-   practice, this means registration requests must now include the following:
-   'type':'m.login.password'. See UPGRADE for more information on this.
- * The 'user_id' key has been renamed to 'user' to better match the login API.
- * There is an additional login type: 'm.login.email.identity'.
- * The command client and web client have been updated to reflect these changes.
-
-Changes in synapse 0.2.3 (2014-09-12)
-=====================================
-
-Homeserver:
- * Fix bug where we stopped sending events to remote home servers if a
-   user from that home server left, even if there were some still in the
-   room.
- * Fix bugs in the state conflict resolution where it was incorrectly
-   rejecting events.
-
-Webclient:
- * Display room names and topics.
- * Allow setting/editing of room names and topics.
- * Display information about rooms on the main page.
- * Handle ban and kick events in real time.
- * VoIP UI and reliability improvements.
- * Add glare support for VoIP.
- * Improvements to initial startup speed.
- * Don't display duplicate join events.
- * Local echo of messages.
- * Differentiate sending and sent of local echo.
- * Various minor bug fixes.
-
-Changes in synapse 0.2.2 (2014-09-06)
-=====================================
-
-Homeserver:
- * When the server returns state events it now also includes the previous
-   content.
- * Add support for inviting people when creating a new room.
- * Make the homeserver inform the room via `m.room.aliases` when a new alias
-   is added for a room.
- * Validate `m.room.power_level` events.
-
-Webclient:
- * Add support for captchas on registration.
- * Handle `m.room.aliases` events.
- * Asynchronously send messages and show a local echo.
- * Inform the UI when a message failed to send.
- * Only autoscroll on receiving a new message if the user was already at the
-   bottom of the screen.
- * Add support for ban/kick reasons.
-
-Changes in synapse 0.2.1 (2014-09-03)
-=====================================
-
-Homeserver:
- * Added support for signing up with a third party id.
- * Add synctl scripts.
- * Added rate limiting.
- * Add option to change the external address the content repo uses.
- * Presence bug fixes.
-
-Webclient:
- * Added support for signing up with a third party id.
- * Added support for banning and kicking users.
- * Added support for displaying and setting ops.
- * Added support for room names.
- * Fix bugs with room membership event display.
-
-Changes in synapse 0.2.0 (2014-09-02)
-=====================================
-This update changes many configuration options, updates the
-database schema and mandates SSL for server-server connections.
-
-Homeserver:
- * Require SSL for server-server connections.
- * Add SSL listener for client-server connections.
- * Add ability to use config files.
- * Add support for kicking/banning and power levels.
- * Allow setting of room names and topics on creation.
- * Change presence to include last seen time of the user.
- * Change url path prefix to /_matrix/...
- * Bug fixes to presence.
-
-Webclient:
- * Reskin the CSS for registration and login.
- * Various improvements to rooms CSS.
- * Support changes in client-server API.
- * Bug fixes to VOIP UI.
- * Various bug fixes to handling of changes to room member list.
-
-Changes in synapse 0.1.2 (2014-08-29)
-=====================================
-
-Webclient:
- * Add basic call state UI for VoIP calls.
-
-Changes in synapse 0.1.1 (2014-08-29)
-=====================================
-
-Homeserver:
-    * Fix bug that caused the event stream to not notify some clients about
-      changes.
-
-Changes in synapse 0.1.0 (2014-08-29)
-=====================================
-Presence has been reenabled in this release.
-
-Homeserver:
- * Update client to server API, including:
-    - Use a more consistent url scheme.
-    - Provide more useful information in the initial sync api.
- * Change the presence handling to be much more efficient.
- * Change the presence server to server API to not require explicit polling of
-   all users who share a room with a user.
- * Fix races in the event streaming logic.
-
-Webclient:
- * Update to use new client to server API.
- * Add basic VOIP support.
- * Add idle timers that change your status to away.
- * Add recent rooms column when viewing a room.
- * Various network efficiency improvements.
- * Add basic mobile browser support.
- * Add a settings page.
-
-Changes in synapse 0.0.1 (2014-08-22)
-=====================================
-Presence has been disabled in this release due to a bug that caused the
-homeserver to spam other remote homeservers.
-
-Homeserver:
- * Completely change the database schema to support generic event types.
- * Improve presence reliability.
- * Improve reliability of joining remote rooms.
- * Fix bug where room join events were duplicated.
- * Improve initial sync API to return more information to the client.
- * Stop generating fake messages for room membership events.
-
-Webclient:
- * Add tab completion of names.
- * Add ability to upload and send images.
- * Add profile pages.
- * Improve CSS layout of room.
- * Disambiguate identical display names.
- * Don't get remote users display names and avatars individually.
- * Use the new initial sync API to reduce number of round trips to the homeserver.
- * Change url scheme to use room aliases instead of room ids where known.
- * Increase longpoll timeout.
-
-Changes in synapse 0.0.0 (2014-08-13)
-=====================================
-
- * Initial alpha release
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index c6ee16efc7..f9de78a460 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -30,11 +30,11 @@ use github's pull request workflow to review the contribution, and either ask
 you to make any refinements needed or merge it and make them ourselves. The
 changes will then land on master when we next do a release.
 
-We use `Jenkins <http://matrix.org/jenkins>`_ and 
+We use `Jenkins <http://matrix.org/jenkins>`_ and
 `Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
-integration. All pull requests to synapse get automatically tested by Travis; 
-the Jenkins builds require an adminstrator to start them. If your change 
-breaks the build, this will be shown in github, so please keep an eye on the 
+integration. All pull requests to synapse get automatically tested by Travis;
+the Jenkins builds require an adminstrator to start them. If your change
+breaks the build, this will be shown in github, so please keep an eye on the
 pull request for feedback.
 
 Code style
@@ -48,6 +48,27 @@ Please ensure your changes match the cosmetic style of the existing project,
 and **never** mix cosmetic and functional changes in the same commit, as it
 makes it horribly hard to review otherwise.
 
+Changelog
+~~~~~~~~~
+
+All changes, even minor ones, need a corresponding changelog / newsfragment
+entry. These are managed by Towncrier
+(https://github.com/hawkowl/towncrier).
+
+To create a changelog entry, make a new file in the ``changelog.d``
+file named in the format of ``PRnumber.type``. The type can be
+one of ``feature``, ``bugfix``, ``removal`` (also used for
+deprecations), or ``misc`` (for internal-only changes). The content of
+the file is your changelog entry, which can contain Markdown
+formatting. Adding credits to the changelog is encouraged, we value
+your contributions and would like to have you shouted out in the
+release notes!
+
+For example, a fix in PR #1234 would have its changelog entry in
+``changelog.d/1234.bugfix``, and contain content like "The security levels of
+Florbs are now validated when recieved over federation. Contributed by Jane
+Matrix".
+
 Attribution
 ~~~~~~~~~~~
 
@@ -105,16 +126,20 @@ the contribution or otherwise have the right to contribute it to Matrix::
         personal information I submit with it, including my sign-off) is
         maintained indefinitely and may be redistributed consistent with
         this project or the open source license(s) involved.
-        
+
 If you agree to this for your contribution, then all that's needed is to
 include the line in your commit or pull request comment::
 
     Signed-off-by: Your Name <your@email.example.org>
-    
-...using your real name; unfortunately pseudonyms and anonymous contributions
-can't be accepted. Git makes this trivial - just use the -s flag when you do
-``git commit``, having first set ``user.name`` and ``user.email`` git configs
-(which you should have done anyway :)
+
+We accept contributions under a legally identifiable name, such as
+your name on government documentation or common-law names (names
+claimed by legitimate usage or repute). Unfortunately, we cannot
+accept anonymous contributions at this time.
+
+Git allows you to add this signoff automatically when using the ``-s``
+flag to ``git commit``, which uses the name and email set in your
+``user.name`` and ``user.email`` git configs.
 
 Conclusion
 ~~~~~~~~~~
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index 565341fee3..0000000000
--- a/Dockerfile
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM docker.io/python:2-alpine3.7
-
-RUN apk add --no-cache --virtual .nacl_deps su-exec build-base libffi-dev zlib-dev libressl-dev libjpeg-turbo-dev linux-headers postgresql-dev libxslt-dev
-
-COPY . /synapse
-
-# A wheel cache may be provided in ./cache for faster build
-RUN cd /synapse \
- && pip install --upgrade pip setuptools psycopg2 lxml \
- && mkdir -p /synapse/cache \
- && pip install -f /synapse/cache --upgrade --process-dependency-links . \
- && mv /synapse/contrib/docker/start.py /synapse/contrib/docker/conf / \
- && rm -rf setup.py setup.cfg synapse
-
-VOLUME ["/data"]
-
-EXPOSE 8008/tcp 8448/tcp
-
-ENTRYPOINT ["/start.py"]
diff --git a/MANIFEST.in b/MANIFEST.in
index e2a6623a63..e0826ba544 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -2,6 +2,7 @@ include synctl
 include LICENSE
 include VERSION
 include *.rst
+include *.md
 include demo/README
 include demo/demo.tls.dh
 include demo/*.py
@@ -29,5 +30,10 @@ exclude Dockerfile
 exclude .dockerignore
 recursive-exclude jenkins *.sh
 
+include pyproject.toml
+recursive-include changelog.d *
+
 prune .github
 prune demo/etc
+prune docker
+prune .circleci
diff --git a/README.rst b/README.rst
index 68e31ba8cf..d6f34ba9d1 100644
--- a/README.rst
+++ b/README.rst
@@ -71,7 +71,7 @@ We'd like to invite you to join #matrix:matrix.org (via
 https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
 at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
 `APIs <https://matrix.org/docs/api>`_ and `Client SDKs
-<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
+<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
 
 Thanks for using Matrix!
 
@@ -157,9 +157,15 @@ if you prefer.
 
 In case of problems, please see the _`Troubleshooting` section below.
 
-There is an offical synapse image available at https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with the docker-compose file available at `contrib/docker`. Further information on this including configuration options is available in `contrib/docker/README.md`.
+There is an offical synapse image available at 
+https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
+the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
+this including configuration options is available in the README on
+hub.docker.com.
 
-Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a Dockerfile to automate a synapse server in a single Docker image, at https://hub.docker.com/r/avhost/docker-matrix/tags/
+Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
+Dockerfile to automate a synapse server in a single Docker image, at
+https://hub.docker.com/r/avhost/docker-matrix/tags/
 
 Configuring synapse
 -------------------
@@ -279,7 +285,7 @@ Connecting to Synapse from a client
 
 The easiest way to try out your new Synapse installation is by connecting to it
 from a web client. The easiest option is probably the one at
-http://riot.im/app. You will need to specify a "Custom server" when you log on
+https://riot.im/app. You will need to specify a "Custom server" when you log on
 or register: set this to ``https://domain.tld`` if you setup a reverse proxy
 following the recommended setup, or ``https://localhost:8448`` - remember to specify the
 port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
@@ -325,7 +331,7 @@ Security Note
 =============
 
 Matrix serves raw user generated data in some APIs - specifically the `content
-repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
+repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
 
 Whilst we have tried to mitigate against possible XSS attacks (e.g.
 https://github.com/matrix-org/synapse/pull/1021) we recommend running
@@ -344,7 +350,7 @@ Platform-Specific Instructions
 Debian
 ------
 
-Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
+Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/.
 Note that these packages do not include a client - choose one from
 https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
 
@@ -358,6 +364,19 @@ Synapse is in the Fedora repositories as ``matrix-synapse``::
 Oleg Girko provides Fedora RPMs at
 https://obs.infoserver.lv/project/monitor/matrix-synapse
 
+OpenSUSE
+--------
+
+Synapse is in the OpenSUSE repositories as ``matrix-synapse``::
+
+    sudo zypper install matrix-synapse
+
+SUSE Linux Enterprise Server
+----------------------------
+
+Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
+https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
+
 ArchLinux
 ---------
 
@@ -520,7 +539,7 @@ Troubleshooting Running
 -----------------------
 
 If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
-to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
+to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for
 encryption and digital signatures.
 Unfortunately PyNACL currently has a few issues
 (https://github.com/pyca/pynacl/issues/53) and
@@ -668,8 +687,8 @@ useful just for development purposes. See `<demo/README>`_.
 Using PostgreSQL
 ================
 
-As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
-alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
+As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
+alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
 traditionally used for convenience and simplicity.
 
 The advantages of Postgres include:
@@ -693,7 +712,7 @@ Using a reverse proxy with Synapse
 It is recommended to put a reverse proxy such as
 `nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
 `Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
-`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
+`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
 doing so is that it means that you can expose the default https port (443) to
 Matrix clients without needing to run Synapse with root privileges.
 
diff --git a/changelog.d/.gitignore b/changelog.d/.gitignore
new file mode 100644
index 0000000000..f935021a8f
--- /dev/null
+++ b/changelog.d/.gitignore
@@ -0,0 +1 @@
+!.gitignore
diff --git a/changelog.d/3659.feature b/changelog.d/3659.feature
new file mode 100644
index 0000000000..a5b4821c09
--- /dev/null
+++ b/changelog.d/3659.feature
@@ -0,0 +1 @@
+Support profile API endpoints on workers
diff --git a/changelog.d/3673.misc b/changelog.d/3673.misc
new file mode 100644
index 0000000000..d672111fb9
--- /dev/null
+++ b/changelog.d/3673.misc
@@ -0,0 +1 @@
+Refactor state module to support multiple room versions
diff --git a/changelog.d/3680.feature b/changelog.d/3680.feature
new file mode 100644
index 0000000000..4edaaf76a8
--- /dev/null
+++ b/changelog.d/3680.feature
@@ -0,0 +1 @@
+Server notices for resource limit blocking
diff --git a/changelog.d/3722.bugfix b/changelog.d/3722.bugfix
new file mode 100644
index 0000000000..16cbaf76cb
--- /dev/null
+++ b/changelog.d/3722.bugfix
@@ -0,0 +1 @@
+Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues
diff --git a/changelog.d/3724.feature b/changelog.d/3724.feature
new file mode 100644
index 0000000000..1b374ccf47
--- /dev/null
+++ b/changelog.d/3724.feature
@@ -0,0 +1 @@
+Allow guests to use /rooms/:roomId/event/:eventId
diff --git a/changelog.d/3725.misc b/changelog.d/3725.misc
new file mode 100644
index 0000000000..91ab9d7137
--- /dev/null
+++ b/changelog.d/3725.misc
@@ -0,0 +1 @@
+The synapse.storage module has been ported to Python 3.
diff --git a/changelog.d/3726.misc b/changelog.d/3726.misc
new file mode 100644
index 0000000000..c4f66ec998
--- /dev/null
+++ b/changelog.d/3726.misc
@@ -0,0 +1 @@
+Split the state_group_cache into member and non-member state events (and so speed up LL /sync)
diff --git a/changelog.d/3727.misc b/changelog.d/3727.misc
new file mode 100644
index 0000000000..0b83220d90
--- /dev/null
+++ b/changelog.d/3727.misc
@@ -0,0 +1 @@
+Log failure to authenticate remote servers as warnings (without stack traces)
diff --git a/changelog.d/3730.misc b/changelog.d/3730.misc
new file mode 100644
index 0000000000..b1ea84f732
--- /dev/null
+++ b/changelog.d/3730.misc
@@ -0,0 +1 @@
+The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content.
diff --git a/changelog.d/3734.misc b/changelog.d/3734.misc
new file mode 100644
index 0000000000..4f6e4b3848
--- /dev/null
+++ b/changelog.d/3734.misc
@@ -0,0 +1 @@
+Reference the need for an HTTP replication port when using the federation_reader worker
diff --git a/changelog.d/3735.misc b/changelog.d/3735.misc
new file mode 100644
index 0000000000..f17004be71
--- /dev/null
+++ b/changelog.d/3735.misc
@@ -0,0 +1 @@
+Fix minor spelling error in federation client documentation.
diff --git a/changelog.d/3746.misc b/changelog.d/3746.misc
new file mode 100644
index 0000000000..fc00ee773a
--- /dev/null
+++ b/changelog.d/3746.misc
@@ -0,0 +1 @@
+Fix MAU cache invalidation due to missing yield
diff --git a/changelog.d/3747.bugfix b/changelog.d/3747.bugfix
new file mode 100644
index 0000000000..c41e2a1213
--- /dev/null
+++ b/changelog.d/3747.bugfix
@@ -0,0 +1 @@
+Fix bug where we resent "limit exceeded" server notices repeatedly
diff --git a/changelog.d/3749.feature b/changelog.d/3749.feature
new file mode 100644
index 0000000000..9f8837b106
--- /dev/null
+++ b/changelog.d/3749.feature
@@ -0,0 +1 @@
+Add mau_trial_days config param, so that users only get counted as MAU after N days.
diff --git a/changelog.d/3751.feature b/changelog.d/3751.feature
new file mode 100644
index 0000000000..dc9742b15b
--- /dev/null
+++ b/changelog.d/3751.feature
@@ -0,0 +1 @@
+Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)).
diff --git a/changelog.d/3753.bugfix b/changelog.d/3753.bugfix
new file mode 100644
index 0000000000..b4301267df
--- /dev/null
+++ b/changelog.d/3753.bugfix
@@ -0,0 +1 @@
+Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices
diff --git a/changelog.d/3754.bugfix b/changelog.d/3754.bugfix
new file mode 100644
index 0000000000..6e3ec80194
--- /dev/null
+++ b/changelog.d/3754.bugfix
@@ -0,0 +1 @@
+Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic
diff --git a/changelog.d/3755.bugfix b/changelog.d/3755.bugfix
new file mode 100644
index 0000000000..6a1f83f0ce
--- /dev/null
+++ b/changelog.d/3755.bugfix
@@ -0,0 +1 @@
+Fix tagging of server notice rooms
diff --git a/changelog.d/3756.bugfix b/changelog.d/3756.bugfix
new file mode 100644
index 0000000000..6a1f83f0ce
--- /dev/null
+++ b/changelog.d/3756.bugfix
@@ -0,0 +1 @@
+Fix tagging of server notice rooms
diff --git a/changelog.d/3758.bugfix b/changelog.d/3758.bugfix
new file mode 100644
index 0000000000..862739bfe8
--- /dev/null
+++ b/changelog.d/3758.bugfix
@@ -0,0 +1 @@
+Fix 'admin_uri' config variable and error parameter to be 'admin_contact' to match the spec.
diff --git a/changelog.d/3760.bugfix b/changelog.d/3760.bugfix
new file mode 100644
index 0000000000..ce61fb8a2b
--- /dev/null
+++ b/changelog.d/3760.bugfix
@@ -0,0 +1 @@
+Don't return non-LL-member state in incremental sync state blocks
diff --git a/changelog.d/3764.misc b/changelog.d/3764.misc
new file mode 100644
index 0000000000..f3614f1982
--- /dev/null
+++ b/changelog.d/3764.misc
@@ -0,0 +1 @@
+Make sure that we close db connections opened during init
\ No newline at end of file
diff --git a/changelog.d/3768.bugfix b/changelog.d/3768.bugfix
new file mode 100644
index 0000000000..a039a7fa68
--- /dev/null
+++ b/changelog.d/3768.bugfix
@@ -0,0 +1 @@
+Fix bug in sending presence over federation
diff --git a/changelog.d/3777.bugfix b/changelog.d/3777.bugfix
new file mode 100644
index 0000000000..46efc543a7
--- /dev/null
+++ b/changelog.d/3777.bugfix
@@ -0,0 +1 @@
+Fix bug where preserved threepid user comes to sign up and server is mau blocked
diff --git a/contrib/docker/README.md b/contrib/docker/README.md
index 61592109cb..05254e5192 100644
--- a/contrib/docker/README.md
+++ b/contrib/docker/README.md
@@ -1,29 +1,5 @@
 # Synapse Docker
 
-The `matrixdotorg/synapse` Docker image will run Synapse as a single process. It does not provide a
-database server or a TURN server, you should run these separately.
-
-If you run a Postgres server, you should simply include it in the same Compose
-project or set the proper environment variables and the image will automatically
-use that server.
-
-## Build
-
-Build the docker image with the `docker build` command from the root of the synapse repository.
-
-```
-docker build -t docker.io/matrixdotorg/synapse .
-```
-
-The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
-
-You may have a local Python wheel cache available, in which case copy the relevant packages in the ``cache/`` directory at the root of the project.
-
-## Run
-
-This image is designed to run either with an automatically generated configuration
-file or with a custom configuration that requires manual edition.
-
 ### Automated configuration
 
 It is recommended that you use Docker Compose to run your containers, including
@@ -60,94 +36,6 @@ Then, customize your configuration and run the server:
 docker-compose up -d
 ```
 
-### Without Compose
-
-If you do not wish to use Compose, you may still run this image using plain
-Docker commands. Note that the following is just a guideline and you may need
-to add parameters to the docker run command to account for the network situation
-with your postgres database.
-
-```
-docker run \
-    -d \
-    --name synapse \
-    -v ${DATA_PATH}:/data \
-    -e SYNAPSE_SERVER_NAME=my.matrix.host \
-    -e SYNAPSE_REPORT_STATS=yes \
-    docker.io/matrixdotorg/synapse:latest
-```
-
-## Volumes
-
-The image expects a single volume, located at ``/data``, that will hold:
-
-* temporary files during uploads;
-* uploaded media and thumbnails;
-* the SQLite database if you do not configure postgres;
-* the appservices configuration.
-
-You are free to use separate volumes depending on storage endpoints at your
-disposal. For instance, ``/data/media`` coud be stored on a large but low
-performance hdd storage while other files could be stored on high performance
-endpoints.
-
-In order to setup an application service, simply create an ``appservices``
-directory in the data volume and write the application service Yaml
-configuration file there. Multiple application services are supported.
-
-## Environment
-
-Unless you specify a custom path for the configuration file, a very generic
-file will be generated, based on the following environment settings.
-These are a good starting point for setting up your own deployment.
-
-Global settings:
-
-* ``UID``, the user id Synapse will run as [default 991]
-* ``GID``, the group id Synapse will run as [default 991]
-* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
-
-If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
-then customize it manually. No other environment variable is required.
-
-Otherwise, a dynamic configuration file will be used. The following environment
-variables are available for configuration:
-
-* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
-* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
-  statistics reporting back to the Matrix project which helps us to get funding.
-* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
-  you run your own TLS-capable reverse proxy).
-* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
-  the Synapse instance.
-* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
-* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
-* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
-* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
-  key in order to enable recaptcha upon registration.
-* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
-  key in order to enable recaptcha upon registration.
-* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
-  uris to enable TURN for this homeserver.
-* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
-
-Shared secrets, that will be initialized to random values if not set:
-
-* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
-  registration is disable.
-* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
-  to the server.
-
-Database specific values (will use SQLite if not set):
-
-* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
-* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
-* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
-* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
-
-Mail server specific values (will not send emails if not set):
+### More information
 
-* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
-* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
-* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
-* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
+For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml
index 0b531949e0..3a8dfbae34 100644
--- a/contrib/docker/docker-compose.yml
+++ b/contrib/docker/docker-compose.yml
@@ -6,6 +6,7 @@ version: '3'
 services:
 
   synapse:
+    build: ../..
     image: docker.io/matrixdotorg/synapse:latest
     # Since snyapse does not retry to connect to the database, restart upon
     # failure
diff --git a/contrib/grafana/README.md b/contrib/grafana/README.md
new file mode 100644
index 0000000000..6a6cc0bed4
--- /dev/null
+++ b/contrib/grafana/README.md
@@ -0,0 +1,6 @@
+# Using the Synapse Grafana dashboard
+
+0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
+1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
+2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
+3. Set up additional recording rules
diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json
new file mode 100644
index 0000000000..c58612594a
--- /dev/null
+++ b/contrib/grafana/synapse.json
@@ -0,0 +1,4969 @@
+{
+  "__inputs": [
+    {
+      "name": "DS_PROMETHEUS",
+      "label": "Prometheus",
+      "description": "",
+      "type": "datasource",
+      "pluginId": "prometheus",
+      "pluginName": "Prometheus"
+    }
+  ],
+  "__requires": [
+    {
+      "type": "grafana",
+      "id": "grafana",
+      "name": "Grafana",
+      "version": "5.2.0"
+    },
+    {
+      "type": "panel",
+      "id": "graph",
+      "name": "Graph",
+      "version": "5.0.0"
+    },
+    {
+      "type": "panel",
+      "id": "heatmap",
+      "name": "Heatmap",
+      "version": "5.0.0"
+    },
+    {
+      "type": "datasource",
+      "id": "prometheus",
+      "name": "Prometheus",
+      "version": "5.0.0"
+    }
+  ],
+  "annotations": {
+    "list": [
+      {
+        "builtIn": 1,
+        "datasource": "$datasource",
+        "enable": false,
+        "hide": true,
+        "iconColor": "rgba(0, 211, 255, 1)",
+        "limit": 100,
+        "name": "Annotations & Alerts",
+        "showIn": 0,
+        "type": "dashboard"
+      }
+    ]
+  },
+  "editable": true,
+  "gnetId": null,
+  "graphTooltip": 0,
+  "id": null,
+  "iteration": 1533598785368,
+  "links": [
+    {
+      "asDropdown": true,
+      "icon": "external link",
+      "keepTime": true,
+      "tags": [
+        "matrix"
+      ],
+      "title": "Dashboards",
+      "type": "dashboards"
+    }
+  ],
+  "panels": [
+    {
+      "collapsed": false,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 0
+      },
+      "id": 73,
+      "panels": [],
+      "title": "Overview",
+      "type": "row"
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "${DS_PROMETHEUS}",
+      "fill": 1,
+      "gridPos": {
+        "h": 9,
+        "w": 12,
+        "x": 0,
+        "y": 1
+      },
+      "id": 75,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "process_cpu_seconds:rate2m{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "{{job}}-{{index}} ",
+          "refId": "A"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "CPU usage",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "decimals": null,
+          "format": "percentunit",
+          "label": null,
+          "logBase": 1,
+          "max": "1",
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "cards": {
+        "cardPadding": 0,
+        "cardRound": null
+      },
+      "color": {
+        "cardColor": "#b4ff00",
+        "colorScale": "sqrt",
+        "colorScheme": "interpolateSpectral",
+        "exponent": 0.5,
+        "mode": "spectrum"
+      },
+      "dataFormat": "tsbuckets",
+      "datasource": "${DS_PROMETHEUS}",
+      "gridPos": {
+        "h": 9,
+        "w": 12,
+        "x": 12,
+        "y": 1
+      },
+      "heatmap": {},
+      "highlightCards": true,
+      "id": 85,
+      "legend": {
+        "show": false
+      },
+      "links": [],
+      "targets": [
+        {
+          "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)",
+          "format": "heatmap",
+          "intervalFactor": 1,
+          "legendFormat": "{{le}}",
+          "refId": "A"
+        }
+      ],
+      "title": "Event Send Time",
+      "tooltip": {
+        "show": true,
+        "showHistogram": false
+      },
+      "type": "heatmap",
+      "xAxis": {
+        "show": true
+      },
+      "xBucketNumber": null,
+      "xBucketSize": null,
+      "yAxis": {
+        "decimals": null,
+        "format": "s",
+        "logBase": 2,
+        "max": null,
+        "min": null,
+        "show": true,
+        "splitFactor": null
+      },
+      "yBucketBound": "auto",
+      "yBucketNumber": null,
+      "yBucketSize": null
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "$datasource",
+      "editable": true,
+      "error": false,
+      "fill": 1,
+      "grid": {},
+      "gridPos": {
+        "h": 7,
+        "w": 12,
+        "x": 0,
+        "y": 10
+      },
+      "id": 33,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": false,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 2,
+      "links": [],
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)",
+          "format": "time_series",
+          "intervalFactor": 2,
+          "legendFormat": "",
+          "refId": "A",
+          "step": 20,
+          "target": ""
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Events Persisted",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "cumulative"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "hertz",
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        },
+        {
+          "format": "short",
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 17
+      },
+      "id": 54,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 0,
+          "grid": {},
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 0,
+            "y": 18
+          },
+          "id": 34,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": true,
+          "targets": [
+            {
+              "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}} {{index}}",
+              "refId": "A",
+              "step": 20,
+              "target": ""
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Memory",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "bytes",
+              "logBase": 1,
+              "max": null,
+              "min": "0",
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 12,
+            "y": 18
+          },
+          "id": 37,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [
+            {
+              "alias": "/max$/",
+              "color": "#890F02",
+              "fill": 0,
+              "legend": false
+            }
+          ],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+              "format": "time_series",
+              "hide": false,
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}}",
+              "refId": "A",
+              "step": 20
+            },
+            {
+              "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+              "format": "time_series",
+              "hide": true,
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} max",
+              "refId": "B",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Open FDs",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "none",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 0,
+            "y": 25
+          },
+          "id": 48,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Avg time waiting for db conn",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "decimals": null,
+              "format": "s",
+              "label": "",
+              "logBase": 1,
+              "max": null,
+              "min": "0",
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": false
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 12,
+            "y": 25
+          },
+          "id": 49,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [
+            {
+              "alias": "/^up/",
+              "legend": false,
+              "yaxis": 2
+            }
+          ],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Prometheus scrape time",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "s",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": "0",
+              "show": true
+            },
+            {
+              "decimals": 0,
+              "format": "none",
+              "label": "",
+              "logBase": 1,
+              "max": "0",
+              "min": "-1",
+              "show": false
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 0,
+            "y": 32
+          },
+          "id": 50,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Avg reactor tick time",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "s",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": false
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 12,
+            "y": 32
+          },
+          "id": 5,
+          "legend": {
+            "alignAsTable": false,
+            "avg": false,
+            "current": false,
+            "hideEmpty": false,
+            "hideZero": false,
+            "max": false,
+            "min": false,
+            "rightSide": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [
+            {
+              "alias": "/user/"
+            },
+            {
+              "alias": "/system/"
+            }
+          ],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} system ",
+              "metric": "",
+              "refId": "B",
+              "step": 20
+            },
+            {
+              "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "hide": false,
+              "interval": "",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} user",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [
+            {
+              "colorMode": "custom",
+              "line": true,
+              "lineColor": "rgba(216, 200, 27, 0.27)",
+              "op": "gt",
+              "value": 0.5
+            },
+            {
+              "colorMode": "custom",
+              "line": true,
+              "lineColor": "rgba(234, 112, 112, 0.22)",
+              "op": "gt",
+              "value": 0.8
+            }
+          ],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "CPU",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "decimals": null,
+              "format": "percentunit",
+              "label": "",
+              "logBase": 1,
+              "max": "1.2",
+              "min": 0,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 0,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 0,
+            "y": 39
+          },
+          "id": 53,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}}",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Up",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        }
+      ],
+      "repeat": null,
+      "title": "Process info",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 18
+      },
+      "id": 56,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "decimals": 1,
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 0,
+            "y": 49
+          },
+          "id": 40,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{type}}",
+              "refId": "D"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Events/s Local vs Remote",
+          "tooltip": {
+            "shared": true,
+            "sort": 2,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": "",
+              "logBase": 1,
+              "max": null,
+              "min": "0",
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "decimals": 1,
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 12,
+            "y": 49
+          },
+          "id": 46,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+              "format": "time_series",
+              "instant": false,
+              "intervalFactor": 2,
+              "legendFormat": "{{type}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Events/s by Type",
+          "tooltip": {
+            "shared": false,
+            "sort": 2,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": "0",
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {
+            "irc-freenode (local)": "#EAB839"
+          },
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "decimals": 1,
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 0,
+            "y": 56
+          },
+          "id": 44,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "hideEmpty": true,
+            "hideZero": true,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{origin_entity}} ({{origin_type}})",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Events/s by Origin",
+          "tooltip": {
+            "shared": false,
+            "sort": 2,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": "0",
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "decimals": 1,
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 12,
+            "y": 56
+          },
+          "id": 45,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "hideEmpty": true,
+            "hideZero": true,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "sum(rate(synapse_storage_events_persisted_events_sep{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\"}[$bucket_size])) by (origin_type, origin_entity)",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{origin_entity}} ({{origin_type}})",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Memberships/s by Origin",
+          "tooltip": {
+            "shared": true,
+            "sort": 2,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": "0",
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        }
+      ],
+      "repeat": null,
+      "title": "Event persist rates",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 19
+      },
+      "id": 57,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "decimals": null,
+          "editable": true,
+          "error": false,
+          "fill": 2,
+          "grid": {},
+          "gridPos": {
+            "h": 8,
+            "w": 12,
+            "x": 0,
+            "y": 48
+          },
+          "id": 4,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "hideEmpty": false,
+            "hideZero": true,
+            "max": false,
+            "min": false,
+            "rightSide": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [
+            {
+              "colorMode": "custom",
+              "fill": true,
+              "fillColor": "rgba(216, 200, 27, 0.27)",
+              "op": "gt",
+              "value": 100
+            },
+            {
+              "colorMode": "custom",
+              "fill": true,
+              "fillColor": "rgba(234, 112, 112, 0.22)",
+              "op": "gt",
+              "value": 250
+            }
+          ],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Request Count by arrival time",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "transparent": false,
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 8,
+            "w": 12,
+            "x": 12,
+            "y": 48
+          },
+          "id": 32,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{method}} {{servlet}} {{job}}-{{index}}",
+              "refId": "A",
+              "step": 20,
+              "target": ""
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Top 10 Request Counts",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "decimals": null,
+          "editable": true,
+          "error": false,
+          "fill": 2,
+          "grid": {},
+          "gridPos": {
+            "h": 8,
+            "w": 12,
+            "x": 0,
+            "y": 56
+          },
+          "id": 23,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "hideEmpty": false,
+            "hideZero": true,
+            "max": false,
+            "min": false,
+            "rightSide": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [
+            {
+              "colorMode": "custom",
+              "fill": true,
+              "fillColor": "rgba(216, 200, 27, 0.27)",
+              "op": "gt",
+              "value": 100,
+              "yaxis": "left"
+            },
+            {
+              "colorMode": "custom",
+              "fill": true,
+              "fillColor": "rgba(234, 112, 112, 0.22)",
+              "op": "gt",
+              "value": 250,
+              "yaxis": "left"
+            }
+          ],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Total CPU Usage by Endpoint",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "transparent": false,
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "percentunit",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "decimals": null,
+          "editable": true,
+          "error": false,
+          "fill": 2,
+          "grid": {},
+          "gridPos": {
+            "h": 8,
+            "w": 12,
+            "x": 12,
+            "y": 56
+          },
+          "id": 52,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "hideEmpty": false,
+            "hideZero": true,
+            "max": false,
+            "min": false,
+            "rightSide": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "(rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_response_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [
+            {
+              "colorMode": "custom",
+              "fill": true,
+              "fillColor": "rgba(216, 200, 27, 0.27)",
+              "op": "gt",
+              "value": 100
+            },
+            {
+              "colorMode": "custom",
+              "fill": true,
+              "fillColor": "rgba(234, 112, 112, 0.22)",
+              "op": "gt",
+              "value": 250
+            }
+          ],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Average CPU Usage by Endpoint",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "transparent": false,
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "s",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 8,
+            "w": 12,
+            "x": 0,
+            "y": 64
+          },
+          "id": 7,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "hideEmpty": true,
+            "hideZero": true,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_http_server_response_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "DB Usage by endpoint",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "percentunit",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "decimals": null,
+          "editable": true,
+          "error": false,
+          "fill": 2,
+          "grid": {},
+          "gridPos": {
+            "h": 8,
+            "w": 12,
+            "x": 12,
+            "y": 64
+          },
+          "id": 47,
+          "legend": {
+            "alignAsTable": true,
+            "avg": true,
+            "current": false,
+            "hideEmpty": false,
+            "hideZero": true,
+            "max": true,
+            "min": false,
+            "rightSide": false,
+            "show": true,
+            "total": false,
+            "values": true
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_http_server_response_time_seconds_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])/rate(synapse_http_server_response_time_seconds_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Non-sync avg response time",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "transparent": false,
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "s",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": false
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 0,
+            "y": 72
+          },
+          "id": 103,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "topk(10,synapse_http_server_in_flight_requests_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}}",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Requests in flight",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        }
+      ],
+      "repeat": null,
+      "title": "Requests",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 20
+      },
+      "id": 97,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 0,
+            "y": 23
+          },
+          "id": 99,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} {{name}}",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "CPU usage by background jobs",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "percentunit",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 12,
+            "y": 23
+          },
+          "id": 101,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} {{name}}",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "DB usage by background jobs",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "percentunit",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        }
+      ],
+      "title": "Background jobs",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 21
+      },
+      "id": 81,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 0,
+            "y": 25
+          },
+          "id": 79,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_federation_client_sent_transactions{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 1,
+              "legendFormat": "txn rate",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Outgoing federation transaction rate",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 12,
+            "y": 25
+          },
+          "id": 83,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_federation_server_received_pdus{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 1,
+              "legendFormat": "pdus",
+              "refId": "A"
+            },
+            {
+              "expr": "rate(synapse_federation_server_received_edus{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 1,
+              "legendFormat": "edus",
+              "refId": "B"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Incoming PDU/EDU rate",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        }
+      ],
+      "title": "Federation",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 22
+      },
+      "id": 60,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 0,
+            "y": 23
+          },
+          "id": 51,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_push_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "processed {{job}}",
+              "refId": "A",
+              "step": 20
+            },
+            {
+              "expr": "rate(synapse_push_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "failed {{job}}",
+              "refId": "B",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "HTTP Push rate",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        }
+      ],
+      "repeat": null,
+      "title": "Pushes",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 23
+      },
+      "id": 58,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 0,
+          "grid": {},
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 0,
+            "y": 25
+          },
+          "id": 10,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "hideEmpty": true,
+            "hideZero": true,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{desc}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Top DB transactions by txn rate",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "logBase": 1,
+              "max": null,
+              "min": 0,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 12,
+            "y": 25
+          },
+          "id": 11,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "hideEmpty": true,
+            "hideZero": true,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": true,
+          "targets": [
+            {
+              "expr": "topk(5, rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+              "format": "time_series",
+              "instant": false,
+              "interval": "",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} {{desc}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Top DB transactions by total txn time",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "percentunit",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        }
+      ],
+      "repeat": null,
+      "title": "Database",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 24
+      },
+      "id": 59,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 13,
+            "w": 12,
+            "x": 0,
+            "y": 17
+          },
+          "id": 12,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{block_name}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Total CPU Usage by Block",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "percentunit",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 13,
+            "w": 12,
+            "x": 12,
+            "y": 17
+          },
+          "id": 26,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{block_name}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Average CPU Time per Block",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "ms",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 13,
+            "w": 12,
+            "x": 0,
+            "y": 30
+          },
+          "id": 13,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}} {{block_name}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Total DB Usage by Block",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "percentunit",
+              "logBase": 1,
+              "max": null,
+              "min": 0,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 13,
+            "w": 12,
+            "x": 12,
+            "y": 30
+          },
+          "id": 27,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{block_name}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Average Database Time per Block",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "ms",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 13,
+            "w": 12,
+            "x": 0,
+            "y": 43
+          },
+          "id": 28,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": false,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{block_name}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Average Transactions per Block",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "none",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 13,
+            "w": 12,
+            "x": 12,
+            "y": 43
+          },
+          "id": 25,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": false,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{block_name}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Average Wallclock Time per Block",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "ms",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        }
+      ],
+      "repeat": null,
+      "title": "Per-block metrics",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 25
+      },
+      "id": 61,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "decimals": 2,
+          "editable": true,
+          "error": false,
+          "fill": 0,
+          "grid": {},
+          "gridPos": {
+            "h": 10,
+            "w": 12,
+            "x": 0,
+            "y": 55
+          },
+          "id": 1,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "hideEmpty": true,
+            "hideZero": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{name}} {{job}}-{{index}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Cache Hit Ratio",
+          "tooltip": {
+            "msResolution": true,
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "decimals": null,
+              "format": "percentunit",
+              "label": "",
+              "logBase": 1,
+              "max": "1",
+              "min": 0,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": false
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 10,
+            "w": 12,
+            "x": 12,
+            "y": 55
+          },
+          "id": 8,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "hideZero": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "connected",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+              "format": "time_series",
+              "hide": false,
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{name}} {{job}}-{{index}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Cache Size",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": 0,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 10,
+            "w": 12,
+            "x": 0,
+            "y": 65
+          },
+          "id": 38,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "hideZero": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "connected",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{name}} {{job}}-{{index}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Cache request rate",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "rps",
+              "logBase": 1,
+              "max": null,
+              "min": 0,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "fill": 1,
+          "gridPos": {
+            "h": 10,
+            "w": 12,
+            "x": 12,
+            "y": 65
+          },
+          "id": 39,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "topk(10, rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=\"$job\",instance=\"$instance\"}[$bucket_size]))",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{name}} {{job}}-{{index}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Top 10 cache misses",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "rps",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 0,
+            "y": 75
+          },
+          "id": 65,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 1,
+              "legendFormat": "{{name}} {{job}}-{{index}}",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Cache eviction rate",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "transparent": false,
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "decimals": null,
+              "format": "hertz",
+              "label": "entries / second",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        }
+      ],
+      "repeat": null,
+      "title": "Caches",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 26
+      },
+      "id": 62,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 0,
+            "y": 90
+          },
+          "id": 91,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": true,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])",
+              "format": "time_series",
+              "instant": false,
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} gen {{gen}}",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Total GC time by bucket (10m smoothing)",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "decimals": null,
+              "format": "percentunit",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": "0",
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "$datasource",
+          "decimals": 3,
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {},
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 12,
+            "y": 90
+          },
+          "id": 21,
+          "legend": {
+            "alignAsTable": true,
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null as zero",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}} {{index}} gen {{gen}} ",
+              "refId": "A",
+              "step": 20,
+              "target": ""
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Average GC Time Per Collection",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "s",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 0,
+            "y": 99
+          },
+          "id": 89,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "hideEmpty": true,
+            "hideZero": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
+              "format": "time_series",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} gen {{gen}}",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Currently allocated objects",
+          "tooltip": {
+            "shared": false,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 12,
+            "y": 99
+          },
+          "id": 93,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(python_gc_unreachable_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} gen {{gen}}",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Object counts per collection",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 0,
+            "y": 108
+          },
+          "id": 95,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} gen {{gen}}",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "GC frequency",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "cards": {
+            "cardPadding": 0,
+            "cardRound": null
+          },
+          "color": {
+            "cardColor": "#b4ff00",
+            "colorScale": "sqrt",
+            "colorScheme": "interpolateSpectral",
+            "exponent": 0.5,
+            "max": null,
+            "min": 0,
+            "mode": "spectrum"
+          },
+          "dataFormat": "tsbuckets",
+          "datasource": "${DS_PROMETHEUS}",
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 12,
+            "y": 108
+          },
+          "heatmap": {},
+          "highlightCards": true,
+          "id": 87,
+          "legend": {
+            "show": true
+          },
+          "links": [],
+          "targets": [
+            {
+              "expr": "sum(rate(python_gc_time_bucket[$bucket_size])) by (le)",
+              "format": "heatmap",
+              "intervalFactor": 1,
+              "legendFormat": "{{le}}",
+              "refId": "A"
+            }
+          ],
+          "title": "GC durations",
+          "tooltip": {
+            "show": true,
+            "showHistogram": false
+          },
+          "type": "heatmap",
+          "xAxis": {
+            "show": true
+          },
+          "xBucketNumber": null,
+          "xBucketSize": null,
+          "yAxis": {
+            "decimals": null,
+            "format": "s",
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true,
+            "splitFactor": null
+          },
+          "yBucketBound": "auto",
+          "yBucketNumber": null,
+          "yBucketSize": null
+        }
+      ],
+      "repeat": null,
+      "title": "GC",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 27
+      },
+      "id": 63,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 0,
+            "y": 19
+          },
+          "id": 2,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_replication_tcp_resource_user_sync{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "user started/stopped syncing",
+              "refId": "A",
+              "step": 20
+            },
+            {
+              "expr": "rate(synapse_replication_tcp_resource_federation_ack{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "federation ack",
+              "refId": "B",
+              "step": 20
+            },
+            {
+              "expr": "rate(synapse_replication_tcp_resource_remove_pusher{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "remove pusher",
+              "refId": "C",
+              "step": 20
+            },
+            {
+              "expr": "rate(synapse_replication_tcp_resource_invalidate_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "invalidate cache",
+              "refId": "D",
+              "step": 20
+            },
+            {
+              "expr": "rate(synapse_replication_tcp_resource_user_ip_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "user ip cache",
+              "refId": "E",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Rate of events on replication master",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 12,
+            "y": 19
+          },
+          "id": 41,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 2,
+              "legendFormat": "{{stream_name}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Outgoing stream updates",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 0,
+            "y": 26
+          },
+          "id": 42,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{command}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Rate of incoming commands",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 7,
+            "w": 12,
+            "x": 12,
+            "y": 26
+          },
+          "id": 43,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
+              "format": "time_series",
+              "intervalFactor": 2,
+              "legendFormat": "{{job}}-{{index}} {{command}}",
+              "refId": "A",
+              "step": 20
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Rate of outgoing commands",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "hertz",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        }
+      ],
+      "repeat": null,
+      "title": "Replication",
+      "type": "row"
+    },
+    {
+      "collapsed": true,
+      "gridPos": {
+        "h": 1,
+        "w": 24,
+        "x": 0,
+        "y": 28
+      },
+      "id": 69,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 0,
+            "y": 29
+          },
+          "id": 67,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": " synapse_event_persisted_position{instance=\"$instance\",job=\"synapse\"}  - ignoring(index, job, name) group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+              "format": "time_series",
+              "interval": "",
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} ",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Event processing lag",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "short",
+              "label": "events",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "${DS_PROMETHEUS}",
+          "fill": 1,
+          "gridPos": {
+            "h": 9,
+            "w": 12,
+            "x": 12,
+            "y": 29
+          },
+          "id": 71,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+              "format": "time_series",
+              "hide": false,
+              "intervalFactor": 1,
+              "legendFormat": "{{job}}-{{index}} {{name}}",
+              "refId": "B"
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Age of last processed event",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "ms",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false,
+            "alignLevel": null
+          }
+        }
+      ],
+      "title": "Event processing loop positions",
+      "type": "row"
+    }
+  ],
+  "refresh": "1m",
+  "schemaVersion": 16,
+  "style": "dark",
+  "tags": [
+    "matrix"
+  ],
+  "templating": {
+    "list": [
+      {
+        "current": {
+          "text": "Prometheus",
+          "value": "Prometheus"
+        },
+        "hide": 0,
+        "label": null,
+        "name": "datasource",
+        "options": [],
+        "query": "prometheus",
+        "refresh": 1,
+        "regex": "",
+        "type": "datasource"
+      },
+      {
+        "allFormat": "glob",
+        "auto": true,
+        "auto_count": 100,
+        "auto_min": "30s",
+        "current": {
+          "text": "auto",
+          "value": "$__auto_interval_bucket_size"
+        },
+        "datasource": null,
+        "hide": 0,
+        "includeAll": false,
+        "label": "Bucket Size",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "bucket_size",
+        "options": [
+          {
+            "selected": true,
+            "text": "auto",
+            "value": "$__auto_interval_bucket_size"
+          },
+          {
+            "selected": false,
+            "text": "30s",
+            "value": "30s"
+          },
+          {
+            "selected": false,
+            "text": "1m",
+            "value": "1m"
+          },
+          {
+            "selected": false,
+            "text": "2m",
+            "value": "2m"
+          },
+          {
+            "selected": false,
+            "text": "5m",
+            "value": "5m"
+          }
+        ],
+        "query": "30s,1m,2m,5m",
+        "refresh": 2,
+        "type": "interval"
+      },
+      {
+        "allValue": null,
+        "current": {},
+        "datasource": "$datasource",
+        "hide": 0,
+        "includeAll": false,
+        "label": null,
+        "multi": false,
+        "name": "instance",
+        "options": [],
+        "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, instance)",
+        "refresh": 2,
+        "regex": "",
+        "sort": 0,
+        "tagValuesQuery": "",
+        "tags": [],
+        "tagsQuery": "",
+        "type": "query",
+        "useTags": false
+      },
+      {
+        "allFormat": "regex wildcard",
+        "allValue": "",
+        "current": {},
+        "datasource": "$datasource",
+        "hide": 0,
+        "hideLabel": false,
+        "includeAll": true,
+        "label": "Job",
+        "multi": true,
+        "multiFormat": "regex values",
+        "name": "job",
+        "options": [],
+        "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, job)",
+        "refresh": 2,
+        "refresh_on_load": false,
+        "regex": "",
+        "sort": 1,
+        "tagValuesQuery": "",
+        "tags": [],
+        "tagsQuery": "",
+        "type": "query",
+        "useTags": false
+      },
+      {
+        "allFormat": "regex wildcard",
+        "allValue": ".*",
+        "current": {},
+        "datasource": "$datasource",
+        "hide": 0,
+        "hideLabel": false,
+        "includeAll": true,
+        "label": "",
+        "multi": true,
+        "multiFormat": "regex values",
+        "name": "index",
+        "options": [],
+        "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, index)",
+        "refresh": 2,
+        "refresh_on_load": false,
+        "regex": "",
+        "sort": 3,
+        "tagValuesQuery": "",
+        "tags": [],
+        "tagsQuery": "",
+        "type": "query",
+        "useTags": false
+      }
+    ]
+  },
+  "time": {
+    "from": "now-1h",
+    "to": "now"
+  },
+  "timepicker": {
+    "now": true,
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ],
+    "time_options": [
+      "5m",
+      "15m",
+      "1h",
+      "6h",
+      "12h",
+      "24h",
+      "2d",
+      "7d",
+      "30d"
+    ]
+  },
+  "timezone": "",
+  "title": "Synapse",
+  "uid": "000000012",
+  "version": 127
+}
\ No newline at end of file
diff --git a/docker/Dockerfile b/docker/Dockerfile
new file mode 100644
index 0000000000..777976217d
--- /dev/null
+++ b/docker/Dockerfile
@@ -0,0 +1,35 @@
+FROM docker.io/python:2-alpine3.8
+
+RUN apk add --no-cache --virtual .nacl_deps \
+        build-base \
+        libffi-dev \
+        libjpeg-turbo-dev \
+        libressl-dev \
+        libxslt-dev \
+        linux-headers \
+        postgresql-dev \
+        su-exec \
+        zlib-dev
+
+COPY . /synapse
+
+# A wheel cache may be provided in ./cache for faster build
+RUN cd /synapse \
+ && pip install --upgrade \
+        lxml \
+        pip \
+        psycopg2 \
+        setuptools \
+ && mkdir -p /synapse/cache \
+ && pip install -f /synapse/cache --upgrade --process-dependency-links . \
+ && mv /synapse/docker/start.py /synapse/docker/conf / \
+ && rm -rf \
+        setup.cfg \
+        setup.py \
+        synapse
+
+VOLUME ["/data"]
+
+EXPOSE 8008/tcp 8448/tcp
+
+ENTRYPOINT ["/start.py"]
diff --git a/docker/README.md b/docker/README.md
new file mode 100644
index 0000000000..038c78f7c0
--- /dev/null
+++ b/docker/README.md
@@ -0,0 +1,124 @@
+# Synapse Docker
+
+This Docker image will run Synapse as a single process. It does not provide a database
+server or a TURN server, you should run these separately.
+
+## Run
+
+We do not currently offer a `latest` image, as this has somewhat undefined semantics.
+We instead release only tagged versions so upgrading between releases is entirely
+within your control.
+
+### Using docker-compose (easier)
+
+This image is designed to run either with an automatically generated configuration
+file or with a custom configuration that requires manual editing.
+
+An easy way to make use of this image is via docker-compose. See the
+[contrib/docker](../contrib/docker)
+section of the synapse project for examples.
+
+### Without Compose (harder)
+
+If you do not wish to use Compose, you may still run this image using plain
+Docker commands. Note that the following is just a guideline and you may need
+to add parameters to the docker run command to account for the network situation
+with your postgres database.
+
+```
+docker run \
+    -d \
+    --name synapse \
+    -v ${DATA_PATH}:/data \
+    -e SYNAPSE_SERVER_NAME=my.matrix.host \
+    -e SYNAPSE_REPORT_STATS=yes \
+    docker.io/matrixdotorg/synapse:latest
+```
+
+## Volumes
+
+The image expects a single volume, located at ``/data``, that will hold:
+
+* temporary files during uploads;
+* uploaded media and thumbnails;
+* the SQLite database if you do not configure postgres;
+* the appservices configuration.
+
+You are free to use separate volumes depending on storage endpoints at your
+disposal. For instance, ``/data/media`` coud be stored on a large but low
+performance hdd storage while other files could be stored on high performance
+endpoints.
+
+In order to setup an application service, simply create an ``appservices``
+directory in the data volume and write the application service Yaml
+configuration file there. Multiple application services are supported.
+
+## Environment
+
+Unless you specify a custom path for the configuration file, a very generic
+file will be generated, based on the following environment settings.
+These are a good starting point for setting up your own deployment.
+
+Global settings:
+
+* ``UID``, the user id Synapse will run as [default 991]
+* ``GID``, the group id Synapse will run as [default 991]
+* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
+
+If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
+then customize it manually. No other environment variable is required.
+
+Otherwise, a dynamic configuration file will be used. The following environment
+variables are available for configuration:
+
+* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
+* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
+  statistics reporting back to the Matrix project which helps us to get funding.
+* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
+  you run your own TLS-capable reverse proxy).
+* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
+  the Synapse instance.
+* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
+* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
+* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
+* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
+  key in order to enable recaptcha upon registration.
+* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
+  key in order to enable recaptcha upon registration.
+* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
+  uris to enable TURN for this homeserver.
+* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
+
+Shared secrets, that will be initialized to random values if not set:
+
+* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
+  registration is disable.
+* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
+  to the server.
+
+Database specific values (will use SQLite if not set):
+
+* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
+* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
+* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
+* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
+
+Mail server specific values (will not send emails if not set):
+
+* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
+* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
+* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
+* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
+
+## Build
+
+Build the docker image with the `docker build` command from the root of the synapse repository.
+
+```
+docker build -t docker.io/matrixdotorg/synapse . -f docker/Dockerfile
+```
+
+The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
+
+You may have a local Python wheel cache available, in which case copy the relevant
+packages in the ``cache/`` directory at the root of the project.
diff --git a/contrib/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml
index 6bc25bb45f..6bc25bb45f 100644
--- a/contrib/docker/conf/homeserver.yaml
+++ b/docker/conf/homeserver.yaml
diff --git a/contrib/docker/conf/log.config b/docker/conf/log.config
index 1851995802..1851995802 100644
--- a/contrib/docker/conf/log.config
+++ b/docker/conf/log.config
diff --git a/contrib/docker/start.py b/docker/start.py
index 90e8b9c51a..90e8b9c51a 100755
--- a/contrib/docker/start.py
+++ b/docker/start.py
diff --git a/docs/admin_api/register_api.rst b/docs/admin_api/register_api.rst
new file mode 100644
index 0000000000..16d65c86b3
--- /dev/null
+++ b/docs/admin_api/register_api.rst
@@ -0,0 +1,63 @@
+Shared-Secret Registration
+==========================
+
+This API allows for the creation of users in an administrative and
+non-interactive way. This is generally used for bootstrapping a Synapse
+instance with administrator accounts.
+
+To authenticate yourself to the server, you will need both the shared secret
+(``registration_shared_secret`` in the homeserver configuration), and a
+one-time nonce. If the registration shared secret is not configured, this API
+is not enabled.
+
+To fetch the nonce, you need to request one from the API::
+
+  > GET /_matrix/client/r0/admin/register
+
+  < {"nonce": "thisisanonce"}
+
+Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
+body containing the nonce, username, password, whether they are an admin
+(optional, False by default), and a HMAC digest of the content.
+
+As an example::
+
+  > POST /_matrix/client/r0/admin/register
+  > {
+     "nonce": "thisisanonce",
+     "username": "pepper_roni",
+     "password": "pizza",
+     "admin": true,
+     "mac": "mac_digest_here"
+    }
+
+  < {
+     "access_token": "token_here",
+     "user_id": "@pepper_roni:localhost",
+     "home_server": "test",
+     "device_id": "device_id_here"
+    }
+
+The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
+the shared secret and the content being the nonce, user, password, and either
+the string "admin" or "notadmin", each separated by NULs. For an example of
+generation in Python::
+
+  import hmac, hashlib
+
+  def generate_mac(nonce, user, password, admin=False):
+
+      mac = hmac.new(
+        key=shared_secret,
+        digestmod=hashlib.sha1,
+      )
+
+      mac.update(nonce.encode('utf8'))
+      mac.update(b"\x00")
+      mac.update(user.encode('utf8'))
+      mac.update(b"\x00")
+      mac.update(password.encode('utf8'))
+      mac.update(b"\x00")
+      mac.update(b"admin" if admin else b"notadmin")
+
+      return mac.hexdigest()
diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst
index 1c9c5a6bde..d17121a188 100644
--- a/docs/admin_api/user_admin_api.rst
+++ b/docs/admin_api/user_admin_api.rst
@@ -44,13 +44,26 @@ Deactivate Account
 
 This API deactivates an account. It removes active access tokens, resets the
 password, and deletes third-party IDs (to prevent the user requesting a
-password reset).
+password reset). It can also mark the user as GDPR-erased (stopping their data
+from distributed further, and deleting it entirely if there are no other
+references to it).
 
 The api is::
 
     POST /_matrix/client/r0/admin/deactivate/<user_id>
 
-including an ``access_token`` of a server admin, and an empty request body.
+with a body of:
+
+.. code:: json
+
+    {
+        "erase": true
+    }
+
+including an ``access_token`` of a server admin.
+
+The erase parameter is optional and defaults to 'false'.
+An empty body may be passed for backwards compatibility.
 
 
 Reset password
diff --git a/docs/workers.rst b/docs/workers.rst
index 1d521b9ec5..101e950020 100644
--- a/docs/workers.rst
+++ b/docs/workers.rst
@@ -74,7 +74,7 @@ replication endpoints that it's talking to on the main synapse process.
 ``worker_replication_port`` should point to the TCP replication listener port and
 ``worker_replication_http_port`` should point to the HTTP replication port.
 
-Currently, only the ``event_creator`` worker requires specifying
+Currently, the ``event_creator`` and ``federation_reader`` workers require specifying
 ``worker_replication_http_port``.
 
 For instance::
@@ -173,10 +173,23 @@ endpoints matching the following regular expressions::
     ^/_matrix/federation/v1/backfill/
     ^/_matrix/federation/v1/get_missing_events/
     ^/_matrix/federation/v1/publicRooms
+    ^/_matrix/federation/v1/query/
+    ^/_matrix/federation/v1/make_join/
+    ^/_matrix/federation/v1/make_leave/
+    ^/_matrix/federation/v1/send_join/
+    ^/_matrix/federation/v1/send_leave/
+    ^/_matrix/federation/v1/invite/
+    ^/_matrix/federation/v1/query_auth/
+    ^/_matrix/federation/v1/event_auth/
+    ^/_matrix/federation/v1/exchange_third_party_invite/
+    ^/_matrix/federation/v1/send/
 
 The above endpoints should all be routed to the federation_reader worker by the
 reverse-proxy configuration.
 
+The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
+instance.
+
 ``synapse.app.federation_sender``
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -206,6 +219,10 @@ Handles client API endpoints. It can handle REST endpoints matching the
 following regular expressions::
 
     ^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
 
 ``synapse.app.user_dir``
 ~~~~~~~~~~~~~~~~~~~~~~~~
@@ -224,6 +241,14 @@ regular expressions::
 
     ^/_matrix/client/(api/v1|r0|unstable)/keys/upload
 
+If ``use_presence`` is False in the homeserver config, it can also handle REST
+endpoints matching the following regular expressions::
+
+    ^/_matrix/client/(api/v1|r0|unstable)/presence/[^/]+/status
+
+This "stub" presence handler will pass through ``GET`` request but make the
+``PUT`` effectively a no-op.
+
 It will proxy any requests it cannot handle to the main synapse instance. It
 must therefore be configured with the location of the main instance, via
 the ``worker_main_http_uri`` setting in the frontend_proxy worker configuration
@@ -240,6 +265,7 @@ Handles some event creation. It can handle REST endpoints matching::
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
     ^/_matrix/client/(api/v1|r0|unstable)/join/
+    ^/_matrix/client/(api/v1|r0|unstable)/profile/
 
 It will create events locally and then send them on to the main synapse
 instance to be persisted and handled.
diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh
index a30179f2aa..d95ca846c4 100755
--- a/jenkins/prepare_synapse.sh
+++ b/jenkins/prepare_synapse.sh
@@ -31,5 +31,5 @@ $TOX_BIN/pip install 'setuptools>=18.5'
 $TOX_BIN/pip install 'pip>=10'
 
 { python synapse/python_dependencies.py
-  echo lxml psycopg2
+  echo lxml
 } | xargs $TOX_BIN/pip install
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000..dd099dc9c8
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,30 @@
+[tool.towncrier]
+    package = "synapse"
+    filename = "CHANGES.md"
+    directory = "changelog.d"
+    issue_format = "[\\#{issue}](https://github.com/matrix-org/synapse/issues/{issue})"
+
+    [[tool.towncrier.type]]
+        directory = "feature"
+        name = "Features"
+        showcontent = true
+
+    [[tool.towncrier.type]]
+        directory = "bugfix"
+        name = "Bugfixes"
+        showcontent = true
+
+    [[tool.towncrier.type]]
+        directory = "doc"
+        name = "Improved Documentation"
+        showcontent = true
+
+    [[tool.towncrier.type]]
+        directory = "removal"
+        name = "Deprecations and Removals"
+        showcontent = true
+
+    [[tool.towncrier.type]]
+        directory = "misc"
+        name = "Internal Changes"
+        showcontent = true
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
index 3b28417376..d2acc7654d 100755
--- a/scripts-dev/federation_client.py
+++ b/scripts-dev/federation_client.py
@@ -18,14 +18,22 @@
 from __future__ import print_function
 
 import argparse
+from urlparse import urlparse, urlunparse
+
 import nacl.signing
 import json
 import base64
 import requests
 import sys
+
+from requests.adapters import HTTPAdapter
 import srvlookup
 import yaml
 
+# uncomment the following to enable debug logging of http requests
+#from httplib import HTTPConnection
+#HTTPConnection.debuglevel = 1
+
 def encode_base64(input_bytes):
     """Encode bytes as a base64 string without any padding."""
 
@@ -113,17 +121,6 @@ def read_signing_keys(stream):
     return keys
 
 
-def lookup(destination, path):
-    if ":" in destination:
-        return "https://%s%s" % (destination, path)
-    else:
-        try:
-            srv = srvlookup.lookup("matrix", "tcp", destination)[0]
-            return "https://%s:%d%s" % (srv.host, srv.port, path)
-        except:
-            return "https://%s:%d%s" % (destination, 8448, path)
-
-
 def request_json(method, origin_name, origin_key, destination, path, content):
     if method is None:
         if content is None:
@@ -152,13 +149,19 @@ def request_json(method, origin_name, origin_key, destination, path, content):
         authorization_headers.append(bytes(header))
         print ("Authorization: %s" % header, file=sys.stderr)
 
-    dest = lookup(destination, path)
+    dest = "matrix://%s%s" % (destination, path)
     print ("Requesting %s" % dest, file=sys.stderr)
 
-    result = requests.request(
+    s = requests.Session()
+    s.mount("matrix://", MatrixConnectionAdapter())
+
+    result = s.request(
         method=method,
         url=dest,
-        headers={"Authorization": authorization_headers[0]},
+        headers={
+            "Host": destination,
+            "Authorization": authorization_headers[0]
+        },
         verify=False,
         data=content,
     )
@@ -242,5 +245,39 @@ def read_args_from_config(args):
             args.signing_key_path = config['signing_key_path']
 
 
+class MatrixConnectionAdapter(HTTPAdapter):
+    @staticmethod
+    def lookup(s):
+        if s[-1] == ']':
+            # ipv6 literal (with no port)
+            return s, 8448
+
+        if ":" in s:
+            out = s.rsplit(":",1)
+            try:
+                port = int(out[1])
+            except ValueError:
+                raise ValueError("Invalid host:port '%s'" % s)
+            return out[0], port
+
+        try:
+            srv = srvlookup.lookup("matrix", "tcp", s)[0]
+            return srv.host, srv.port
+        except:
+            return s, 8448
+
+    def get_connection(self, url, proxies=None):
+        parsed = urlparse(url)
+
+        (host, port) = self.lookup(parsed.netloc)
+        netloc = "%s:%d" % (host, port)
+        print("Connecting to %s" % (netloc,), file=sys.stderr)
+        url = urlunparse((
+            "https", netloc, parsed.path, parsed.params, parsed.query,
+            parsed.fragment,
+        ))
+        return super(MatrixConnectionAdapter, self).get_connection(url, proxies)
+
+
 if __name__ == "__main__":
     main()
diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user
index 12ed20d623..8c3d429351 100755
--- a/scripts/register_new_matrix_user
+++ b/scripts/register_new_matrix_user
@@ -26,11 +26,37 @@ import yaml
 
 
 def request_registration(user, password, server_location, shared_secret, admin=False):
+    req = urllib2.Request(
+        "%s/_matrix/client/r0/admin/register" % (server_location,),
+        headers={'Content-Type': 'application/json'}
+    )
+
+    try:
+        if sys.version_info[:3] >= (2, 7, 9):
+            # As of version 2.7.9, urllib2 now checks SSL certs
+            import ssl
+            f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
+        else:
+            f = urllib2.urlopen(req)
+        body = f.read()
+        f.close()
+        nonce = json.loads(body)["nonce"]
+    except urllib2.HTTPError as e:
+        print "ERROR! Received %d %s" % (e.code, e.reason,)
+        if 400 <= e.code < 500:
+            if e.info().type == "application/json":
+                resp = json.load(e)
+                if "error" in resp:
+                    print resp["error"]
+        sys.exit(1)
+
     mac = hmac.new(
         key=shared_secret,
         digestmod=hashlib.sha1,
     )
 
+    mac.update(nonce)
+    mac.update("\x00")
     mac.update(user)
     mac.update("\x00")
     mac.update(password)
@@ -40,10 +66,10 @@ def request_registration(user, password, server_location, shared_secret, admin=F
     mac = mac.hexdigest()
 
     data = {
-        "user": user,
+        "nonce": nonce,
+        "username": user,
         "password": password,
         "mac": mac,
-        "type": "org.matrix.login.shared_secret",
         "admin": admin,
     }
 
@@ -52,7 +78,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
     print "Sending registration request..."
 
     req = urllib2.Request(
-        "%s/_matrix/client/api/v1/register" % (server_location,),
+        "%s/_matrix/client/r0/admin/register" % (server_location,),
         data=json.dumps(data),
         headers={'Content-Type': 'application/json'}
     )
diff --git a/setup.cfg b/setup.cfg
index fa6f2d1ce4..c2620be6c5 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -14,8 +14,26 @@ ignore =
     pylint.cfg
     tox.ini
 
-[flake8]
+[pep8]
 max-line-length = 90
-#  W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
-#  E203 is contrary to PEP8.
+#  W503 requires that binary operators be at the end, not start, of lines. Erik
+#  doesn't like it.  E203 is contrary to PEP8.
 ignore = W503,E203
+
+[flake8]
+# note that flake8 inherits the "ignore" settings from "pep8" (because it uses
+# pep8 to do those checks), but not the "max-line-length" setting
+max-line-length = 90
+
+[isort]
+line_length = 89
+not_skip = __init__.py
+sections=FUTURE,STDLIB,COMPAT,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER
+default_section=THIRDPARTY
+known_first_party = synapse
+known_tests=tests
+known_compat = mock,six
+known_twisted=twisted,OpenSSL
+multi_line_output=3
+include_trailing_comma=true
+combine_as_imports=true
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 78fc63aa49..e62901b761 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,4 +17,4 @@
 """ This is a reference implementation of a Matrix home server.
 """
 
-__version__ = "0.31.1"
+__version__ = "0.33.3"
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 06fa38366d..34382e4e3c 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -18,14 +18,17 @@ import logging
 from six import itervalues
 
 import pymacaroons
+from netaddr import IPAddress
+
 from twisted.internet import defer
 
 import synapse.types
 from synapse import event_auth
-from synapse.api.constants import EventTypes, Membership, JoinRules
-from synapse.api.errors import AuthError, Codes
+from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.errors import AuthError, Codes, ResourceLimitError
+from synapse.config.server import is_threepid_reserved
 from synapse.types import UserID
-from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR
+from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
 from synapse.util.caches.lrucache import LruCache
 from synapse.util.metrics import Measure
 
@@ -63,8 +66,9 @@ class Auth(object):
 
     @defer.inlineCallbacks
     def check_from_context(self, event, context, do_sig_check=True):
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
         auth_events_ids = yield self.compute_auth_events(
-            event, context.prev_state_ids, for_verification=True,
+            event, prev_state_ids, for_verification=True,
         )
         auth_events = yield self.store.get_events(auth_events_ids)
         auth_events = {
@@ -191,7 +195,7 @@ class Auth(object):
                     synapse.types.create_requester(user_id, app_service=app_service)
                 )
 
-            access_token = get_access_token_from_request(
+            access_token = self.get_access_token_from_request(
                 request, self.TOKEN_NOT_FOUND_HTTP_STATUS
             )
 
@@ -208,9 +212,9 @@ class Auth(object):
             user_agent = request.requestHeaders.getRawHeaders(
                 b"User-Agent",
                 default=[b""]
-            )[0]
+            )[0].decode('ascii', 'surrogateescape')
             if user and access_token and ip_addr:
-                self.store.insert_client_ip(
+                yield self.store.insert_client_ip(
                     user_id=user.to_string(),
                     access_token=access_token,
                     ip=ip_addr,
@@ -237,17 +241,22 @@ class Auth(object):
     @defer.inlineCallbacks
     def _get_appservice_user_id(self, request):
         app_service = self.store.get_app_service_by_token(
-            get_access_token_from_request(
+            self.get_access_token_from_request(
                 request, self.TOKEN_NOT_FOUND_HTTP_STATUS
             )
         )
         if app_service is None:
             defer.returnValue((None, None))
 
-        if "user_id" not in request.args:
+        if app_service.ip_range_whitelist:
+            ip_address = IPAddress(self.hs.get_ip_from_request(request))
+            if ip_address not in app_service.ip_range_whitelist:
+                defer.returnValue((None, None))
+
+        if b"user_id" not in request.args:
             defer.returnValue((app_service.sender, app_service))
 
-        user_id = request.args["user_id"][0]
+        user_id = request.args[b"user_id"][0].decode('utf8')
         if app_service.sender == user_id:
             defer.returnValue((app_service.sender, app_service))
 
@@ -488,7 +497,7 @@ class Auth(object):
     def _look_up_user_by_access_token(self, token):
         ret = yield self.store.get_user_by_access_token(token)
         if not ret:
-            logger.warn("Unrecognised access token - not in store: %s" % (token,))
+            logger.warn("Unrecognised access token - not in store.")
             raise AuthError(
                 self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
                 errcode=Codes.UNKNOWN_TOKEN
@@ -506,12 +515,12 @@ class Auth(object):
 
     def get_appservice_by_req(self, request):
         try:
-            token = get_access_token_from_request(
+            token = self.get_access_token_from_request(
                 request, self.TOKEN_NOT_FOUND_HTTP_STATUS
             )
             service = self.store.get_app_service_by_token(token)
             if not service:
-                logger.warn("Unrecognised appservice access token: %s" % (token,))
+                logger.warn("Unrecognised appservice access token.")
                 raise AuthError(
                     self.TOKEN_NOT_FOUND_HTTP_STATUS,
                     "Unrecognised access token.",
@@ -537,7 +546,8 @@ class Auth(object):
 
     @defer.inlineCallbacks
     def add_auth_events(self, builder, context):
-        auth_ids = yield self.compute_auth_events(builder, context.prev_state_ids)
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        auth_ids = yield self.compute_auth_events(builder, prev_state_ids)
 
         auth_events_entries = yield self.store.add_event_hashes(
             auth_ids
@@ -655,7 +665,7 @@ class Auth(object):
             auth_events[(EventTypes.PowerLevels, "")] = power_level_event
 
         send_level = event_auth.get_send_level(
-            EventTypes.Aliases, "", auth_events
+            EventTypes.Aliases, "", power_level_event,
         )
         user_level = event_auth.get_user_power_level(user_id, auth_events)
 
@@ -666,67 +676,156 @@ class Auth(object):
                 " edit its room list entry"
             )
 
+    @staticmethod
+    def has_access_token(request):
+        """Checks if the request has an access_token.
 
-def has_access_token(request):
-    """Checks if the request has an access_token.
+        Returns:
+            bool: False if no access_token was given, True otherwise.
+        """
+        query_params = request.args.get(b"access_token")
+        auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
+        return bool(query_params) or bool(auth_headers)
 
-    Returns:
-        bool: False if no access_token was given, True otherwise.
-    """
-    query_params = request.args.get("access_token")
-    auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
-    return bool(query_params) or bool(auth_headers)
-
-
-def get_access_token_from_request(request, token_not_found_http_status=401):
-    """Extracts the access_token from the request.
-
-    Args:
-        request: The http request.
-        token_not_found_http_status(int): The HTTP status code to set in the
-            AuthError if the token isn't found. This is used in some of the
-            legacy APIs to change the status code to 403 from the default of
-            401 since some of the old clients depended on auth errors returning
-            403.
-    Returns:
-        str: The access_token
-    Raises:
-        AuthError: If there isn't an access_token in the request.
-    """
+    @staticmethod
+    def get_access_token_from_request(request, token_not_found_http_status=401):
+        """Extracts the access_token from the request.
 
-    auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
-    query_params = request.args.get(b"access_token")
-    if auth_headers:
-        # Try the get the access_token from a "Authorization: Bearer"
-        # header
-        if query_params is not None:
-            raise AuthError(
-                token_not_found_http_status,
-                "Mixing Authorization headers and access_token query parameters.",
-                errcode=Codes.MISSING_TOKEN,
-            )
-        if len(auth_headers) > 1:
-            raise AuthError(
-                token_not_found_http_status,
-                "Too many Authorization headers.",
-                errcode=Codes.MISSING_TOKEN,
-            )
-        parts = auth_headers[0].split(" ")
-        if parts[0] == "Bearer" and len(parts) == 2:
-            return parts[1]
+        Args:
+            request: The http request.
+            token_not_found_http_status(int): The HTTP status code to set in the
+                AuthError if the token isn't found. This is used in some of the
+                legacy APIs to change the status code to 403 from the default of
+                401 since some of the old clients depended on auth errors returning
+                403.
+        Returns:
+            unicode: The access_token
+        Raises:
+            AuthError: If there isn't an access_token in the request.
+        """
+
+        auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
+        query_params = request.args.get(b"access_token")
+        if auth_headers:
+            # Try the get the access_token from a "Authorization: Bearer"
+            # header
+            if query_params is not None:
+                raise AuthError(
+                    token_not_found_http_status,
+                    "Mixing Authorization headers and access_token query parameters.",
+                    errcode=Codes.MISSING_TOKEN,
+                )
+            if len(auth_headers) > 1:
+                raise AuthError(
+                    token_not_found_http_status,
+                    "Too many Authorization headers.",
+                    errcode=Codes.MISSING_TOKEN,
+                )
+            parts = auth_headers[0].split(b" ")
+            if parts[0] == b"Bearer" and len(parts) == 2:
+                return parts[1].decode('ascii')
+            else:
+                raise AuthError(
+                    token_not_found_http_status,
+                    "Invalid Authorization header.",
+                    errcode=Codes.MISSING_TOKEN,
+                )
         else:
-            raise AuthError(
-                token_not_found_http_status,
-                "Invalid Authorization header.",
-                errcode=Codes.MISSING_TOKEN,
+            # Try to get the access_token from the query params.
+            if not query_params:
+                raise AuthError(
+                    token_not_found_http_status,
+                    "Missing access token.",
+                    errcode=Codes.MISSING_TOKEN
+                )
+
+            return query_params[0].decode('ascii')
+
+    @defer.inlineCallbacks
+    def check_in_room_or_world_readable(self, room_id, user_id):
+        """Checks that the user is or was in the room or the room is world
+        readable. If it isn't then an exception is raised.
+
+        Returns:
+            Deferred[tuple[str, str|None]]: Resolves to the current membership of
+            the user in the room and the membership event ID of the user. If
+            the user is not in the room and never has been, then
+            `(Membership.JOIN, None)` is returned.
+        """
+
+        try:
+            # check_user_was_in_room will return the most recent membership
+            # event for the user if:
+            #  * The user is a non-guest user, and was ever in the room
+            #  * The user is a guest user, and has joined the room
+            # else it will throw.
+            member_event = yield self.check_user_was_in_room(room_id, user_id)
+            defer.returnValue((member_event.membership, member_event.event_id))
+        except AuthError:
+            visibility = yield self.state.get_current_state(
+                room_id, EventTypes.RoomHistoryVisibility, ""
             )
-    else:
-        # Try to get the access_token from the query params.
-        if not query_params:
+            if (
+                visibility and
+                visibility.content["history_visibility"] == "world_readable"
+            ):
+                defer.returnValue((Membership.JOIN, None))
+                return
             raise AuthError(
-                token_not_found_http_status,
-                "Missing access token.",
-                errcode=Codes.MISSING_TOKEN
+                403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
+            )
+
+    @defer.inlineCallbacks
+    def check_auth_blocking(self, user_id=None, threepid=None):
+        """Checks if the user should be rejected for some external reason,
+        such as monthly active user limiting or global disable flag
+
+        Args:
+            user_id(str|None): If present, checks for presence against existing
+            MAU cohort
+
+            threepid(dict|None): If present, checks for presence against configured
+            reserved threepid. Used in cases where the user is trying register
+            with a MAU blocked server, normally they would be rejected but their
+            threepid is on the reserved list. user_id and
+            threepid should never be set at the same time.
+        """
+
+        # Never fail an auth check for the server notices users
+        # This can be a problem where event creation is prohibited due to blocking
+        if user_id == self.hs.config.server_notices_mxid:
+            return
+
+        if self.hs.config.hs_disabled:
+            raise ResourceLimitError(
+                403, self.hs.config.hs_disabled_message,
+                errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
+                admin_contact=self.hs.config.admin_contact,
+                limit_type=self.hs.config.hs_disabled_limit_type
             )
+        if self.hs.config.limit_usage_by_mau is True:
+            assert not (user_id and threepid)
 
-        return query_params[0]
+            # If the user is already part of the MAU cohort or a trial user
+            if user_id:
+                timestamp = yield self.store.user_last_seen_monthly_active(user_id)
+                if timestamp:
+                    return
+
+                is_trial = yield self.store.is_trial_user(user_id)
+                if is_trial:
+                    return
+            elif threepid:
+                # If the user does not exist yet, but is signing up with a
+                # reserved threepid then pass auth check
+                if is_threepid_reserved(self.hs.config, threepid):
+                    return
+            # Else if there is no room in the MAU bucket, bail
+            current_mau = yield self.store.get_monthly_active_count()
+            if current_mau >= self.hs.config.max_mau_value:
+                raise ResourceLimitError(
+                    403, "Monthly Active User Limit Exceeded",
+                    admin_contact=self.hs.config.admin_contact,
+                    errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
+                    limit_type="monthly_active_user"
+                )
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 5baba43966..c2630c4c64 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
 # Copyright 2017 Vector Creations Ltd
+# Copyright 2018 New Vector Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -76,6 +77,9 @@ class EventTypes(object):
     Topic = "m.room.topic"
     Name = "m.room.name"
 
+    ServerACL = "m.room.server_acl"
+    Pinned = "m.room.pinned_events"
+
 
 class RejectedReason(object):
     AUTH_ERROR = "auth_error"
@@ -92,3 +96,19 @@ class RoomCreationPreset(object):
 class ThirdPartyEntityKind(object):
     USER = "user"
     LOCATION = "location"
+
+
+class RoomVersions(object):
+    V1 = "1"
+    VDH_TEST = "vdh-test-version"
+
+
+# the version we will give rooms which are created on this server
+DEFAULT_ROOM_VERSION = RoomVersions.V1
+
+# vdh-test-version is a placeholder to get room versioning support working and tested
+# until we have a working v2.
+KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
+
+ServerNoticeMsgType = "m.server_notice"
+ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index e6ad3768f0..2e7f98404d 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,10 +18,11 @@
 
 import logging
 
-import simplejson as json
 from six import iteritems
 from six.moves import http_client
 
+from canonicaljson import json
+
 logger = logging.getLogger(__name__)
 
 
@@ -54,6 +56,9 @@ class Codes(object):
     SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
     CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
     CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
+    RESOURCE_LIMIT_EXCEEDED = "M_RESOURCE_LIMIT_EXCEEDED"
+    UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
+    INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
 
 
 class CodeMessageException(RuntimeError):
@@ -68,20 +73,6 @@ class CodeMessageException(RuntimeError):
         self.code = code
         self.msg = msg
 
-    def error_dict(self):
-        return cs_error(self.msg)
-
-
-class MatrixCodeMessageException(CodeMessageException):
-    """An error from a general matrix endpoint, eg. from a proxied Matrix API call.
-
-    Attributes:
-        errcode (str): Matrix error code e.g 'M_FORBIDDEN'
-    """
-    def __init__(self, code, msg, errcode=Codes.UNKNOWN):
-        super(MatrixCodeMessageException, self).__init__(code, msg)
-        self.errcode = errcode
-
 
 class SynapseError(CodeMessageException):
     """A base exception type for matrix errors which have an errcode and error
@@ -107,38 +98,28 @@ class SynapseError(CodeMessageException):
             self.errcode,
         )
 
-    @classmethod
-    def from_http_response_exception(cls, err):
-        """Make a SynapseError based on an HTTPResponseException
-
-        This is useful when a proxied request has failed, and we need to
-        decide how to map the failure onto a matrix error to send back to the
-        client.
 
-        An attempt is made to parse the body of the http response as a matrix
-        error. If that succeeds, the errcode and error message from the body
-        are used as the errcode and error message in the new synapse error.
-
-        Otherwise, the errcode is set to M_UNKNOWN, and the error message is
-        set to the reason code from the HTTP response.
-
-        Args:
-            err (HttpResponseException):
+class ProxiedRequestError(SynapseError):
+    """An error from a general matrix endpoint, eg. from a proxied Matrix API call.
 
-        Returns:
-            SynapseError:
-        """
-        # try to parse the body as json, to get better errcode/msg, but
-        # default to M_UNKNOWN with the HTTP status as the error text
-        try:
-            j = json.loads(err.response)
-        except ValueError:
-            j = {}
-        errcode = j.get('errcode', Codes.UNKNOWN)
-        errmsg = j.get('error', err.msg)
+    Attributes:
+        errcode (str): Matrix error code e.g 'M_FORBIDDEN'
+    """
+    def __init__(self, code, msg, errcode=Codes.UNKNOWN, additional_fields=None):
+        super(ProxiedRequestError, self).__init__(
+            code, msg, errcode
+        )
+        if additional_fields is None:
+            self._additional_fields = {}
+        else:
+            self._additional_fields = dict(additional_fields)
 
-        res = SynapseError(err.code, errmsg, errcode)
-        return res
+    def error_dict(self):
+        return cs_error(
+            self.msg,
+            self.errcode,
+            **self._additional_fields
+        )
 
 
 class ConsentNotGivenError(SynapseError):
@@ -250,6 +231,30 @@ class AuthError(SynapseError):
         super(AuthError, self).__init__(*args, **kwargs)
 
 
+class ResourceLimitError(SynapseError):
+    """
+    Any error raised when there is a problem with resource usage.
+    For instance, the monthly active user limit for the server has been exceeded
+    """
+    def __init__(
+        self, code, msg,
+        errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
+        admin_contact=None,
+        limit_type=None,
+    ):
+        self.admin_contact = admin_contact
+        self.limit_type = limit_type
+        super(ResourceLimitError, self).__init__(code, msg, errcode=errcode)
+
+    def error_dict(self):
+        return cs_error(
+            self.msg,
+            self.errcode,
+            admin_contact=self.admin_contact,
+            limit_type=self.limit_type
+        )
+
+
 class EventSizeError(SynapseError):
     """An error raised when an event is too big."""
 
@@ -307,12 +312,25 @@ class LimitExceededError(SynapseError):
         )
 
 
-def cs_exception(exception):
-    if isinstance(exception, CodeMessageException):
-        return exception.error_dict()
-    else:
-        logger.error("Unknown exception type: %s", type(exception))
-        return {}
+class IncompatibleRoomVersionError(SynapseError):
+    """A server is trying to join a room whose version it does not support."""
+
+    def __init__(self, room_version):
+        super(IncompatibleRoomVersionError, self).__init__(
+            code=400,
+            msg="Your homeserver does not support the features required to "
+                "join this room",
+            errcode=Codes.INCOMPATIBLE_ROOM_VERSION,
+        )
+
+        self._room_version = room_version
+
+    def error_dict(self):
+        return cs_error(
+            self.msg,
+            self.errcode,
+            room_version=self._room_version,
+        )
 
 
 def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
@@ -371,7 +389,7 @@ class HttpResponseException(CodeMessageException):
     Represents an HTTP-level failure of an outbound request
 
     Attributes:
-        response (str): body of response
+        response (bytes): body of response
     """
     def __init__(self, code, msg, response):
         """
@@ -379,7 +397,39 @@ class HttpResponseException(CodeMessageException):
         Args:
             code (int): HTTP status code
             msg (str): reason phrase from HTTP response status line
-            response (str): body of response
+            response (bytes): body of response
         """
         super(HttpResponseException, self).__init__(code, msg)
         self.response = response
+
+    def to_synapse_error(self):
+        """Make a SynapseError based on an HTTPResponseException
+
+        This is useful when a proxied request has failed, and we need to
+        decide how to map the failure onto a matrix error to send back to the
+        client.
+
+        An attempt is made to parse the body of the http response as a matrix
+        error. If that succeeds, the errcode and error message from the body
+        are used as the errcode and error message in the new synapse error.
+
+        Otherwise, the errcode is set to M_UNKNOWN, and the error message is
+        set to the reason code from the HTTP response.
+
+        Returns:
+            SynapseError:
+        """
+        # try to parse the body as json, to get better errcode/msg, but
+        # default to M_UNKNOWN with the HTTP status as the error text
+        try:
+            j = json.loads(self.response)
+        except ValueError:
+            j = {}
+
+        if not isinstance(j, dict):
+            j = {}
+
+        errcode = j.pop('errcode', Codes.UNKNOWN)
+        errmsg = j.pop('error', self.msg)
+
+        return ProxiedRequestError(self.code, errmsg, errcode, j)
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index dbc0e7e445..186831e118 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -12,15 +12,16 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from synapse.api.errors import SynapseError
-from synapse.storage.presence import UserPresenceState
-from synapse.types import UserID, RoomID
-from twisted.internet import defer
-
-import simplejson as json
 import jsonschema
+from canonicaljson import json
 from jsonschema import FormatChecker
 
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError
+from synapse.storage.presence import UserPresenceState
+from synapse.types import RoomID, UserID
+
 FILTER_SCHEMA = {
     "additionalProperties": False,
     "type": "object",
@@ -112,7 +113,13 @@ ROOM_EVENT_FILTER_SCHEMA = {
         },
         "contains_url": {
             "type": "boolean"
-        }
+        },
+        "lazy_load_members": {
+            "type": "boolean"
+        },
+        "include_redundant_members": {
+            "type": "boolean"
+        },
     }
 }
 
@@ -260,6 +267,12 @@ class FilterCollection(object):
     def ephemeral_limit(self):
         return self._room_ephemeral_filter.limit()
 
+    def lazy_load_members(self):
+        return self._room_state_filter.lazy_load_members()
+
+    def include_redundant_members(self):
+        return self._room_state_filter.include_redundant_members()
+
     def filter_presence(self, events):
         return self._presence_filter.filter(events)
 
@@ -416,6 +429,12 @@ class Filter(object):
     def limit(self):
         return self.filter_json.get("limit", 10)
 
+    def lazy_load_members(self):
+        return self.filter_json.get("lazy_load_members", False)
+
+    def include_redundant_members(self):
+        return self.filter_json.get("include_redundant_members", False)
+
 
 def _matches_wildcard(actual_value, filter_value):
     if filter_value.endswith("*"):
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index 06cc8d90b8..3bb5b3da37 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -72,7 +72,7 @@ class Ratelimiter(object):
         return allowed, time_allowed
 
     def prune_message_counts(self, time_now_s):
-        for user_id in self.message_counts.keys():
+        for user_id in list(self.message_counts.keys()):
             message_count, time_start, msg_rate_hz = (
                 self.message_counts[user_id]
             )
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index bb46b5da8a..71347912f1 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -15,8 +15,8 @@
 # limitations under the License.
 
 """Contains the URL paths to prefix various aspects of the server with. """
-from hashlib import sha256
 import hmac
+from hashlib import sha256
 
 from six.moves.urllib.parse import urlencode
 
diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py
index 9c2b627590..3b6b9368b8 100644
--- a/synapse/app/__init__.py
+++ b/synapse/app/__init__.py
@@ -14,9 +14,11 @@
 # limitations under the License.
 
 import sys
+
+from synapse import python_dependencies  # noqa: E402
+
 sys.dont_write_bytecode = True
 
-from synapse import python_dependencies   # noqa: E402
 
 try:
     python_dependencies.check_requirements()
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index a6925ab139..7c866e246a 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -17,15 +17,18 @@ import gc
 import logging
 import sys
 
+from daemonize import Daemonize
+
+from twisted.internet import error, reactor
+
+from synapse.util import PreserveLoggingContext
+from synapse.util.rlimit import change_resource_limit
+
 try:
     import affinity
 except Exception:
     affinity = None
 
-from daemonize import Daemonize
-from synapse.util import PreserveLoggingContext
-from synapse.util.rlimit import change_resource_limit
-from twisted.internet import error, reactor
 
 logger = logging.getLogger(__name__)
 
@@ -137,7 +140,7 @@ def listen_metrics(bind_addresses, port):
         logger.info("Metrics now reporting on %s:%d", host, port)
 
 
-def listen_tcp(bind_addresses, port, factory, backlog=50):
+def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
     """
     Create a TCP socket for a port and several addresses
     """
@@ -153,7 +156,9 @@ def listen_tcp(bind_addresses, port, factory, backlog=50):
             check_bind_error(e, address, bind_addresses)
 
 
-def listen_ssl(bind_addresses, port, factory, context_factory, backlog=50):
+def listen_ssl(
+    bind_addresses, port, factory, context_factory, reactor=reactor, backlog=50
+):
     """
     Create an SSL socket for a port and several addresses
     """
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index dd114dee07..86b5067400 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -16,6 +16,9 @@
 import logging
 import sys
 
+from twisted.internet import defer, reactor
+from twisted.web.resource import NoResource
+
 import synapse
 from synapse import events
 from synapse.app import _base
@@ -23,6 +26,7 @@ from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.logger import setup_logging
 from synapse.http.site import SynapseSite
+from synapse.metrics import RegistryProxy
 from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
 from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
 from synapse.replication.slave.storage.directory import DirectoryStore
@@ -35,8 +39,6 @@ from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.logcontext import LoggingContext, run_in_background
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
-from twisted.internet import reactor, defer
-from twisted.web.resource import NoResource
 
 logger = logging.getLogger("synapse.app.appservice")
 
@@ -49,10 +51,7 @@ class AppserviceSlaveStore(
 
 
 class AppserviceServer(HomeServer):
-    def setup(self):
-        logger.info("Setting up.")
-        self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
-        logger.info("Finished setting up.")
+    DATASTORE_CLASS = AppserviceSlaveStore
 
     def _listen_http(self, listener_config):
         port = listener_config["port"]
@@ -62,7 +61,7 @@ class AppserviceServer(HomeServer):
         for res in listener_config["resources"]:
             for name in res["names"]:
                 if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(self)
+                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
 
         root_resource = create_resource_tree(resources, NoResource())
 
@@ -97,7 +96,7 @@ class AppserviceServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -115,8 +114,9 @@ class ASReplicationHandler(ReplicationClientHandler):
         super(ASReplicationHandler, self).__init__(hs.get_datastore())
         self.appservice_handler = hs.get_application_service_handler()
 
+    @defer.inlineCallbacks
     def on_rdata(self, stream_name, token, rows):
-        super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
+        yield super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
 
         if stream_name == "events":
             max_stream_id = self.store.get_room_max_stream_ordering()
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 85dada7f9f..ce2b113dbb 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -16,6 +16,9 @@
 import logging
 import sys
 
+from twisted.internet import reactor
+from twisted.web.resource import NoResource
+
 import synapse
 from synapse import events
 from synapse.app import _base
@@ -28,6 +31,7 @@ from synapse.http.site import SynapseSite
 from synapse.metrics import RegistryProxy
 from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
 from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
 from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
 from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
 from synapse.replication.slave.storage.directory import DirectoryStore
@@ -35,29 +39,34 @@ from synapse.replication.slave.storage.events import SlavedEventStore
 from synapse.replication.slave.storage.keys import SlavedKeyStore
 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
 from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.slave.storage.transactions import TransactionStore
+from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.rest.client.v1.room import PublicRoomListRestServlet
+from synapse.rest.client.v1.room import (
+    JoinedRoomMemberListRestServlet,
+    PublicRoomListRestServlet,
+    RoomEventContextServlet,
+    RoomMemberListRestServlet,
+    RoomStateRestServlet,
+)
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.logcontext import LoggingContext
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
-from twisted.internet import reactor
-from twisted.web.resource import NoResource
 
 logger = logging.getLogger("synapse.app.client_reader")
 
 
 class ClientReaderSlavedStore(
+    SlavedAccountDataStore,
     SlavedEventStore,
     SlavedKeyStore,
     RoomStore,
     DirectoryStore,
     SlavedApplicationServiceStore,
     SlavedRegistrationStore,
-    TransactionStore,
+    SlavedTransactionStore,
     SlavedClientIpStore,
     BaseSlavedStore,
 ):
@@ -65,10 +74,7 @@ class ClientReaderSlavedStore(
 
 
 class ClientReaderServer(HomeServer):
-    def setup(self):
-        logger.info("Setting up.")
-        self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
-        logger.info("Finished setting up.")
+    DATASTORE_CLASS = ClientReaderSlavedStore
 
     def _listen_http(self, listener_config):
         port = listener_config["port"]
@@ -81,7 +87,13 @@ class ClientReaderServer(HomeServer):
                     resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
                 elif name == "client":
                     resource = JsonResource(self, canonical_json=False)
+
                     PublicRoomListRestServlet(self).register(resource)
+                    RoomMemberListRestServlet(self).register(resource)
+                    JoinedRoomMemberListRestServlet(self).register(resource)
+                    RoomStateRestServlet(self).register(resource)
+                    RoomEventContextServlet(self).register(resource)
+
                     resources.update({
                         "/_matrix/client/r0": resource,
                         "/_matrix/client/unstable": resource,
@@ -122,7 +134,7 @@ class ClientReaderServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -153,11 +165,13 @@ def start(config_options):
     database_engine = create_engine(config.database_config)
 
     tls_server_context_factory = context_factory.ServerContextFactory(config)
+    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
 
     ss = ClientReaderServer(
         config.server_name,
         db_config=config.database_config,
         tls_server_context_factory=tls_server_context_factory,
+        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index 5ca77c0f1a..f98e456ea0 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -16,6 +16,9 @@
 import logging
 import sys
 
+from twisted.internet import reactor
+from twisted.web.resource import NoResource
+
 import synapse
 from synapse import events
 from synapse.app import _base
@@ -40,27 +43,36 @@ from synapse.replication.slave.storage.pushers import SlavedPusherStore
 from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
 from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.slave.storage.transactions import TransactionStore
+from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.rest.client.v1.profile import (
+    ProfileAvatarURLRestServlet,
+    ProfileDisplaynameRestServlet,
+    ProfileRestServlet,
+)
 from synapse.rest.client.v1.room import (
-    RoomSendEventRestServlet, RoomMembershipRestServlet, RoomStateEventRestServlet,
     JoinRoomAliasServlet,
+    RoomMembershipRestServlet,
+    RoomSendEventRestServlet,
+    RoomStateEventRestServlet,
 )
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
+from synapse.storage.user_directory import UserDirectoryStore
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.logcontext import LoggingContext
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
-from twisted.internet import reactor
-from twisted.web.resource import NoResource
 
 logger = logging.getLogger("synapse.app.event_creator")
 
 
 class EventCreatorSlavedStore(
+    # FIXME(#3714): We need to add UserDirectoryStore as we write directly
+    # rather than going via the correct worker.
+    UserDirectoryStore,
     DirectoryStore,
-    TransactionStore,
+    SlavedTransactionStore,
     SlavedProfileStore,
     SlavedAccountDataStore,
     SlavedPusherStore,
@@ -78,10 +90,7 @@ class EventCreatorSlavedStore(
 
 
 class EventCreatorServer(HomeServer):
-    def setup(self):
-        logger.info("Setting up.")
-        self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self)
-        logger.info("Finished setting up.")
+    DATASTORE_CLASS = EventCreatorSlavedStore
 
     def _listen_http(self, listener_config):
         port = listener_config["port"]
@@ -98,6 +107,9 @@ class EventCreatorServer(HomeServer):
                     RoomMembershipRestServlet(self).register(resource)
                     RoomStateEventRestServlet(self).register(resource)
                     JoinRoomAliasServlet(self).register(resource)
+                    ProfileAvatarURLRestServlet(self).register(resource)
+                    ProfileDisplaynameRestServlet(self).register(resource)
+                    ProfileRestServlet(self).register(resource)
                     resources.update({
                         "/_matrix/client/r0": resource,
                         "/_matrix/client/unstable": resource,
@@ -138,7 +150,7 @@ class EventCreatorServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -171,11 +183,13 @@ def start(config_options):
     database_engine = create_engine(config.database_config)
 
     tls_server_context_factory = context_factory.ServerContextFactory(config)
+    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
 
     ss = EventCreatorServer(
         config.server_name,
         db_config=config.database_config,
         tls_server_context_factory=tls_server_context_factory,
+        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 2a1995d0cd..60f5973505 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -16,6 +16,9 @@
 import logging
 import sys
 
+from twisted.internet import reactor
+from twisted.web.resource import NoResource
+
 import synapse
 from synapse import events
 from synapse.api.urls import FEDERATION_PREFIX
@@ -29,11 +32,17 @@ from synapse.http.site import SynapseSite
 from synapse.metrics import RegistryProxy
 from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
 from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
+from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
 from synapse.replication.slave.storage.directory import DirectoryStore
 from synapse.replication.slave.storage.events import SlavedEventStore
 from synapse.replication.slave.storage.keys import SlavedKeyStore
+from synapse.replication.slave.storage.profile import SlavedProfileStore
+from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
+from synapse.replication.slave.storage.pushers import SlavedPusherStore
+from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
 from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.slave.storage.transactions import TransactionStore
+from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
@@ -41,28 +50,29 @@ from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.logcontext import LoggingContext
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
-from twisted.internet import reactor
-from twisted.web.resource import NoResource
 
 logger = logging.getLogger("synapse.app.federation_reader")
 
 
 class FederationReaderSlavedStore(
+    SlavedAccountDataStore,
+    SlavedProfileStore,
+    SlavedApplicationServiceStore,
+    SlavedPusherStore,
+    SlavedPushRuleStore,
+    SlavedReceiptsStore,
     SlavedEventStore,
     SlavedKeyStore,
     RoomStore,
     DirectoryStore,
-    TransactionStore,
+    SlavedTransactionStore,
     BaseSlavedStore,
 ):
     pass
 
 
 class FederationReaderServer(HomeServer):
-    def setup(self):
-        logger.info("Setting up.")
-        self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
-        logger.info("Finished setting up.")
+    DATASTORE_CLASS = FederationReaderSlavedStore
 
     def _listen_http(self, listener_config):
         port = listener_config["port"]
@@ -111,7 +121,7 @@ class FederationReaderServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -142,11 +152,13 @@ def start(config_options):
     database_engine = create_engine(config.database_config)
 
     tls_server_context_factory = context_factory.ServerContextFactory(config)
+    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
 
     ss = FederationReaderServer(
         config.server_name,
         db_config=config.database_config,
         tls_server_context_factory=tls_server_context_factory,
+        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index 81ad574043..60dd09aac3 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -16,6 +16,9 @@
 import logging
 import sys
 
+from twisted.internet import defer, reactor
+from twisted.web.resource import NoResource
+
 import synapse
 from synapse import events
 from synapse.app import _base
@@ -33,23 +36,21 @@ from synapse.replication.slave.storage.events import SlavedEventStore
 from synapse.replication.slave.storage.presence import SlavedPresenceStore
 from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.slave.storage.transactions import TransactionStore
+from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
-from synapse.util.async import Linearizer
+from synapse.util.async_helpers import Linearizer
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.logcontext import LoggingContext, run_in_background
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
-from twisted.internet import defer, reactor
-from twisted.web.resource import NoResource
 
 logger = logging.getLogger("synapse.app.federation_sender")
 
 
 class FederationSenderSlaveStore(
-    SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
+    SlavedDeviceInboxStore, SlavedTransactionStore, SlavedReceiptsStore, SlavedEventStore,
     SlavedRegistrationStore, SlavedDeviceStore, SlavedPresenceStore,
 ):
     def __init__(self, db_conn, hs):
@@ -77,10 +78,7 @@ class FederationSenderSlaveStore(
 
 
 class FederationSenderServer(HomeServer):
-    def setup(self):
-        logger.info("Setting up.")
-        self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
-        logger.info("Finished setting up.")
+    DATASTORE_CLASS = FederationSenderSlaveStore
 
     def _listen_http(self, listener_config):
         port = listener_config["port"]
@@ -125,7 +123,7 @@ class FederationSenderServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -143,8 +141,9 @@ class FederationSenderReplicationHandler(ReplicationClientHandler):
         super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
         self.send_handler = FederationSenderHandler(hs, self)
 
+    @defer.inlineCallbacks
     def on_rdata(self, stream_name, token, rows):
-        super(FederationSenderReplicationHandler, self).on_rdata(
+        yield super(FederationSenderReplicationHandler, self).on_rdata(
             stream_name, token, rows
         )
         self.send_handler.process_replication_rows(stream_name, token, rows)
@@ -185,11 +184,13 @@ def start(config_options):
     config.send_federation = True
 
     tls_server_context_factory = context_factory.ServerContextFactory(config)
+    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
 
     ps = FederationSenderServer(
         config.server_name,
         db_config=config.database_config,
         tls_server_context_factory=tls_server_context_factory,
+        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index 5a164a7a95..8c0b9c67b0 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -16,6 +16,9 @@
 import logging
 import sys
 
+from twisted.internet import defer, reactor
+from twisted.web.resource import NoResource
+
 import synapse
 from synapse import events
 from synapse.api.errors import SynapseError
@@ -25,9 +28,7 @@ from synapse.config.homeserver import HomeServerConfig
 from synapse.config.logger import setup_logging
 from synapse.crypto import context_factory
 from synapse.http.server import JsonResource
-from synapse.http.servlet import (
-    RestServlet, parse_json_object_from_request,
-)
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.http.site import SynapseSite
 from synapse.metrics import RegistryProxy
 from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
@@ -37,6 +38,7 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
 from synapse.replication.slave.storage.devices import SlavedDeviceStore
 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
 from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns
 from synapse.rest.client.v2_alpha._base import client_v2_patterns
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
@@ -44,12 +46,39 @@ from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.logcontext import LoggingContext
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
-from twisted.internet import defer, reactor
-from twisted.web.resource import NoResource
 
 logger = logging.getLogger("synapse.app.frontend_proxy")
 
 
+class PresenceStatusStubServlet(ClientV1RestServlet):
+    PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
+
+    def __init__(self, hs):
+        super(PresenceStatusStubServlet, self).__init__(hs)
+        self.http_client = hs.get_simple_http_client()
+        self.auth = hs.get_auth()
+        self.main_uri = hs.config.worker_main_http_uri
+
+    @defer.inlineCallbacks
+    def on_GET(self, request, user_id):
+        # Pass through the auth headers, if any, in case the access token
+        # is there.
+        auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
+        headers = {
+            "Authorization": auth_headers,
+        }
+        result = yield self.http_client.get_json(
+            self.main_uri + request.uri,
+            headers=headers,
+        )
+        defer.returnValue((200, result))
+
+    @defer.inlineCallbacks
+    def on_PUT(self, request, user_id):
+        yield self.auth.get_user_by_req(request)
+        defer.returnValue((200, {}))
+
+
 class KeyUploadServlet(RestServlet):
     PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
 
@@ -119,10 +148,7 @@ class FrontendProxySlavedStore(
 
 
 class FrontendProxyServer(HomeServer):
-    def setup(self):
-        logger.info("Setting up.")
-        self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
-        logger.info("Finished setting up.")
+    DATASTORE_CLASS = FrontendProxySlavedStore
 
     def _listen_http(self, listener_config):
         port = listener_config["port"]
@@ -136,6 +162,12 @@ class FrontendProxyServer(HomeServer):
                 elif name == "client":
                     resource = JsonResource(self, canonical_json=False)
                     KeyUploadServlet(self).register(resource)
+
+                    # If presence is disabled, use the stub servlet that does
+                    # not allow sending presence
+                    if not self.config.use_presence:
+                        PresenceStatusStubServlet(self).register(resource)
+
                     resources.update({
                         "/_matrix/client/r0": resource,
                         "/_matrix/client/unstable": resource,
@@ -154,7 +186,8 @@ class FrontendProxyServer(HomeServer):
                 listener_config,
                 root_resource,
                 self.version_string,
-            )
+            ),
+            reactor=self.get_reactor()
         )
 
         logger.info("Synapse client reader now listening on port %d", port)
@@ -176,7 +209,7 @@ class FrontendProxyServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -209,11 +242,13 @@ def start(config_options):
     database_engine = create_engine(config.database_config)
 
     tls_server_context_factory = context_factory.ServerContextFactory(config)
+    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
 
     ss = FrontendProxyServer(
         config.server_name,
         db_config=config.database_config,
         tls_server_context_factory=tls_server_context_factory,
+        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 714f98a3e0..3eb5b663de 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -18,34 +18,51 @@ import logging
 import os
 import sys
 
+from six import iteritems
+
+from prometheus_client import Gauge
+
+from twisted.application import service
+from twisted.internet import defer, reactor
+from twisted.web.resource import EncodingResourceWrapper, NoResource
+from twisted.web.server import GzipEncoderFactory
+from twisted.web.static import File
+
 import synapse
 import synapse.config.logger
 from synapse import events
-from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \
-    LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \
-    STATIC_PREFIX, WEB_CLIENT_PREFIX
+from synapse.api.urls import (
+    CONTENT_REPO_PREFIX,
+    FEDERATION_PREFIX,
+    LEGACY_MEDIA_PREFIX,
+    MEDIA_PREFIX,
+    SERVER_KEY_PREFIX,
+    SERVER_KEY_V2_PREFIX,
+    STATIC_PREFIX,
+    WEB_CLIENT_PREFIX,
+)
 from synapse.app import _base
-from synapse.app._base import quit_with_error, listen_ssl, listen_tcp
+from synapse.app._base import listen_ssl, listen_tcp, quit_with_error
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 from synapse.crypto import context_factory
 from synapse.federation.transport.server import TransportLayerServer
-from synapse.module_api import ModuleApi
 from synapse.http.additional_resource import AdditionalResource
 from synapse.http.server import RootRedirect
 from synapse.http.site import SynapseSite
 from synapse.metrics import RegistryProxy
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
-from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
-    check_requirements
-from synapse.replication.http import ReplicationRestResource, REPLICATION_PREFIX
+from synapse.module_api import ModuleApi
+from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirements
+from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
 from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
 from synapse.rest import ClientRestResource
 from synapse.rest.key.v1.server_key_resource import LocalKey
 from synapse.rest.key.v2 import KeyApiV2Resource
 from synapse.rest.media.v0.content_repository import ContentRepoResource
 from synapse.server import HomeServer
-from synapse.storage import are_all_users_on_domain
+from synapse.storage import DataStore, are_all_users_on_domain
 from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
 from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
 from synapse.util.caches import CACHE_SIZE_FACTOR
@@ -55,11 +72,6 @@ from synapse.util.manhole import manhole
 from synapse.util.module_loader import load_module
 from synapse.util.rlimit import change_resource_limit
 from synapse.util.versionstring import get_version_string
-from twisted.application import service
-from twisted.internet import defer, reactor
-from twisted.web.resource import EncodingResourceWrapper, NoResource
-from twisted.web.server import GzipEncoderFactory
-from twisted.web.static import File
 
 logger = logging.getLogger("synapse.app.homeserver")
 
@@ -99,6 +111,8 @@ def build_resource_for_web_client(hs):
 
 
 class SynapseHomeServer(HomeServer):
+    DATASTORE_CLASS = DataStore
+
     def _listener_http(self, config, listener_config):
         port = listener_config["port"]
         bind_addresses = listener_config["bind_addresses"]
@@ -266,7 +280,7 @@ class SynapseHomeServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -290,6 +304,11 @@ class SynapseHomeServer(HomeServer):
             quit_with_error(e.message)
 
 
+# Gauges to expose monthly active user control metrics
+current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
+max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
+
+
 def setup(config_options):
     """
     Args:
@@ -318,14 +337,10 @@ def setup(config_options):
     # check any extra requirements we have now we have a config
     check_requirements(config)
 
-    version_string = "Synapse/" + get_version_string(synapse)
-
-    logger.info("Server hostname: %s", config.server_name)
-    logger.info("Server version: %s", version_string)
-
     events.USE_FROZEN_DICTS = config.use_frozen_dicts
 
     tls_server_context_factory = context_factory.ServerContextFactory(config)
+    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
 
     database_engine = create_engine(config.database_config)
     config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
@@ -334,21 +349,22 @@ def setup(config_options):
         config.server_name,
         db_config=config.database_config,
         tls_server_context_factory=tls_server_context_factory,
+        tls_client_options_factory=tls_client_options_factory,
         config=config,
-        version_string=version_string,
+        version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
     )
 
     logger.info("Preparing database: %s...", config.database_config['name'])
 
     try:
-        db_conn = hs.get_db_conn(run_new_connection=False)
-        prepare_database(db_conn, database_engine, config=config)
-        database_engine.on_new_connection(db_conn)
+        with hs.get_db_conn(run_new_connection=False) as db_conn:
+            prepare_database(db_conn, database_engine, config=config)
+            database_engine.on_new_connection(db_conn)
 
-        hs.run_startup_checks(db_conn, database_engine)
+            hs.run_startup_checks(db_conn, database_engine)
 
-        db_conn.commit()
+            db_conn.commit()
     except UpgradeDatabaseException:
         sys.stderr.write(
             "\nFailed to upgrade database.\n"
@@ -423,6 +439,9 @@ def run(hs):
     # currently either 0 or 1
     stats_process = []
 
+    def start_phone_stats_home():
+        return run_as_background_process("phone_stats_home", phone_stats_home)
+
     @defer.inlineCallbacks
     def phone_stats_home():
         logger.info("Gathering stats for reporting")
@@ -440,7 +459,7 @@ def run(hs):
         stats["total_nonbridged_users"] = total_nonbridged_users
 
         daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
-        for name, count in daily_user_type_results.iteritems():
+        for name, count in iteritems(daily_user_type_results):
             stats["daily_user_type_" + name] = count
 
         room_count = yield hs.get_datastore().get_room_count()
@@ -451,7 +470,7 @@ def run(hs):
         stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
 
         r30_results = yield hs.get_datastore().count_r30_users()
-        for name, count in r30_results.iteritems():
+        for name, count in iteritems(r30_results):
             stats["r30_users_" + name] = count
 
         daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
@@ -494,16 +513,41 @@ def run(hs):
             )
 
     def generate_user_daily_visit_stats():
-        hs.get_datastore().generate_user_daily_visits()
+        return run_as_background_process(
+            "generate_user_daily_visits",
+            hs.get_datastore().generate_user_daily_visits,
+        )
 
     # Rather than update on per session basis, batch up the requests.
     # If you increase the loop period, the accuracy of user_daily_visits
     # table will decrease
     clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
 
+    # monthly active user limiting functionality
+    clock.looping_call(
+        hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60
+    )
+    hs.get_datastore().reap_monthly_active_users()
+
+    @defer.inlineCallbacks
+    def generate_monthly_active_users():
+        count = 0
+        if hs.config.limit_usage_by_mau:
+            count = yield hs.get_datastore().get_monthly_active_count()
+        current_mau_gauge.set(float(count))
+        max_mau_gauge.set(float(hs.config.max_mau_value))
+
+    hs.get_datastore().initialise_reserved_users(
+        hs.config.mau_limits_reserved_threepids
+    )
+    generate_monthly_active_users()
+    if hs.config.limit_usage_by_mau:
+        clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
+    # End of monthly active user settings
+
     if hs.config.report_stats:
         logger.info("Scheduling stats reporting for 3 hour intervals")
-        clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
+        clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000)
 
         # We need to defer this init for the cases that we daemonize
         # otherwise the process ID we get is that of the non-daemon process
@@ -511,7 +555,7 @@ def run(hs):
 
         # We wait 5 minutes to send the first set of stats as the server can
         # be quite busy the first few minutes
-        clock.call_later(5 * 60, phone_stats_home)
+        clock.call_later(5 * 60, start_phone_stats_home)
 
     if hs.config.daemonize and hs.config.print_pidfile:
         print (hs.config.pid_file)
diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index 006bba80a8..e3dbb3b4e6 100644
--- a/synapse/app/media_repository.py
+++ b/synapse/app/media_repository.py
@@ -16,11 +16,12 @@
 import logging
 import sys
 
+from twisted.internet import reactor
+from twisted.web.resource import NoResource
+
 import synapse
 from synapse import events
-from synapse.api.urls import (
-    CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
-)
+from synapse.api.urls import CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
 from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
@@ -33,7 +34,7 @@ from synapse.replication.slave.storage._base import BaseSlavedStore
 from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
 from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.slave.storage.transactions import TransactionStore
+from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
 from synapse.rest.media.v0.content_repository import ContentRepoResource
 from synapse.server import HomeServer
@@ -43,8 +44,6 @@ from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.logcontext import LoggingContext
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
-from twisted.internet import reactor
-from twisted.web.resource import NoResource
 
 logger = logging.getLogger("synapse.app.media_repository")
 
@@ -53,7 +52,7 @@ class MediaRepositorySlavedStore(
     SlavedApplicationServiceStore,
     SlavedRegistrationStore,
     SlavedClientIpStore,
-    TransactionStore,
+    SlavedTransactionStore,
     BaseSlavedStore,
     MediaRepositoryStore,
 ):
@@ -61,10 +60,7 @@ class MediaRepositorySlavedStore(
 
 
 class MediaRepositoryServer(HomeServer):
-    def setup(self):
-        logger.info("Setting up.")
-        self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
-        logger.info("Finished setting up.")
+    DATASTORE_CLASS = MediaRepositorySlavedStore
 
     def _listen_http(self, listener_config):
         port = listener_config["port"]
@@ -118,7 +114,7 @@ class MediaRepositoryServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -156,11 +152,13 @@ def start(config_options):
     database_engine = create_engine(config.database_config)
 
     tls_server_context_factory = context_factory.ServerContextFactory(config)
+    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
 
     ss = MediaRepositoryServer(
         config.server_name,
         db_config=config.database_config,
         tls_server_context_factory=tls_server_context_factory,
+        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 64df47f9cc..244c604de9 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -16,6 +16,9 @@
 import logging
 import sys
 
+from twisted.internet import defer, reactor
+from twisted.web.resource import NoResource
+
 import synapse
 from synapse import events
 from synapse.app import _base
@@ -37,8 +40,6 @@ from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.logcontext import LoggingContext, run_in_background
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
-from twisted.internet import defer, reactor
-from twisted.web.resource import NoResource
 
 logger = logging.getLogger("synapse.app.pusher")
 
@@ -77,10 +78,7 @@ class PusherSlaveStore(
 
 
 class PusherServer(HomeServer):
-    def setup(self):
-        logger.info("Setting up.")
-        self.datastore = PusherSlaveStore(self.get_db_conn(), self)
-        logger.info("Finished setting up.")
+    DATASTORE_CLASS = PusherSlaveStore
 
     def remove_pusher(self, app_id, push_key, user_id):
         self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
@@ -128,7 +126,7 @@ class PusherServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -147,8 +145,9 @@ class PusherReplicationHandler(ReplicationClientHandler):
 
         self.pusher_pool = hs.get_pusherpool()
 
+    @defer.inlineCallbacks
     def on_rdata(self, stream_name, token, rows):
-        super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
+        yield super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
         run_in_background(self.poke_pushers, stream_name, token, rows)
 
     @defer.inlineCallbacks
@@ -161,11 +160,11 @@ class PusherReplicationHandler(ReplicationClientHandler):
                     else:
                         yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
             elif stream_name == "events":
-                yield self.pusher_pool.on_new_notifications(
+                self.pusher_pool.on_new_notifications(
                     token, token,
                 )
             elif stream_name == "receipts":
-                yield self.pusher_pool.on_new_receipts(
+                self.pusher_pool.on_new_receipts(
                     token, token, set(row.room_id for row in rows)
                 )
         except Exception:
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 6808d6d3e0..6662340797 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -17,6 +17,11 @@ import contextlib
 import logging
 import sys
 
+from six import iteritems
+
+from twisted.internet import defer, reactor
+from twisted.web.resource import NoResource
+
 import synapse
 from synapse.api.constants import EventTypes
 from synapse.app import _base
@@ -36,12 +41,12 @@ from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
 from synapse.replication.slave.storage.devices import SlavedDeviceStore
 from synapse.replication.slave.storage.events import SlavedEventStore
 from synapse.replication.slave.storage.filtering import SlavedFilteringStore
+from synapse.replication.slave.storage.groups import SlavedGroupServerStore
 from synapse.replication.slave.storage.presence import SlavedPresenceStore
 from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
 from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
 from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.slave.storage.groups import SlavedGroupServerStore
 from synapse.replication.tcp.client import ReplicationClientHandler
 from synapse.rest.client.v1 import events
 from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
@@ -50,16 +55,11 @@ from synapse.rest.client.v2_alpha import sync
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
 from synapse.storage.presence import UserPresenceState
-from synapse.storage.roommember import RoomMemberStore
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.logcontext import LoggingContext, run_in_background
 from synapse.util.manhole import manhole
 from synapse.util.stringutils import random_string
 from synapse.util.versionstring import get_version_string
-from twisted.internet import defer, reactor
-from twisted.web.resource import NoResource
-
-from six import iteritems
 
 logger = logging.getLogger("synapse.app.synchrotron")
 
@@ -80,9 +80,7 @@ class SynchrotronSlavedStore(
     RoomStore,
     BaseSlavedStore,
 ):
-    did_forget = (
-        RoomMemberStore.__dict__["did_forget"]
-    )
+    pass
 
 
 UPDATE_SYNCING_USERS_MS = 10 * 1000
@@ -116,7 +114,10 @@ class SynchrotronPresence(object):
         logger.info("Presence process_id is %r", self.process_id)
 
     def send_user_sync(self, user_id, is_syncing, last_sync_ms):
-        self.hs.get_tcp_replication().send_user_sync(user_id, is_syncing, last_sync_ms)
+        if self.hs.config.use_presence:
+            self.hs.get_tcp_replication().send_user_sync(
+                user_id, is_syncing, last_sync_ms
+            )
 
     def mark_as_coming_online(self, user_id):
         """A user has started syncing. Send a UserSync to the master, unless they
@@ -213,10 +214,13 @@ class SynchrotronPresence(object):
         yield self.notify_from_replication(states, stream_id)
 
     def get_currently_syncing_users(self):
-        return [
-            user_id for user_id, count in iteritems(self.user_to_num_current_syncs)
-            if count > 0
-        ]
+        if self.hs.config.use_presence:
+            return [
+                user_id for user_id, count in iteritems(self.user_to_num_current_syncs)
+                if count > 0
+            ]
+        else:
+            return set()
 
 
 class SynchrotronTyping(object):
@@ -245,10 +249,7 @@ class SynchrotronApplicationService(object):
 
 
 class SynchrotronServer(HomeServer):
-    def setup(self):
-        logger.info("Setting up.")
-        self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
-        logger.info("Finished setting up.")
+    DATASTORE_CLASS = SynchrotronSlavedStore
 
     def _listen_http(self, listener_config):
         port = listener_config["port"]
@@ -305,7 +306,7 @@ class SynchrotronServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -334,8 +335,9 @@ class SyncReplicationHandler(ReplicationClientHandler):
         self.presence_handler = hs.get_presence_handler()
         self.notifier = hs.get_notifier()
 
+    @defer.inlineCallbacks
     def on_rdata(self, stream_name, token, rows):
-        super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
+        yield super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
         run_in_background(self.process_and_notify, stream_name, token, rows)
 
     def get_streams_to_replicate(self):
diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py
index 56ae086128..d658f967ba 100755
--- a/synapse/app/synctl.py
+++ b/synapse/app/synctl.py
@@ -16,16 +16,19 @@
 
 import argparse
 import collections
+import errno
 import glob
 import os
 import os.path
 import signal
 import subprocess
 import sys
-import yaml
-import errno
 import time
 
+from six import iteritems
+
+import yaml
+
 SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
 
 GREEN = "\x1b[1;32m"
@@ -172,7 +175,7 @@ def main():
         os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
 
     cache_factors = config.get("synctl_cache_factors", {})
-    for cache_name, factor in cache_factors.iteritems():
+    for cache_name, factor in iteritems(cache_factors):
         os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
 
     worker_configfiles = []
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index ada1c13cec..96ffcaf073 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -17,6 +17,9 @@
 import logging
 import sys
 
+from twisted.internet import defer, reactor
+from twisted.web.resource import NoResource
+
 import synapse
 from synapse import events
 from synapse.app import _base
@@ -43,8 +46,6 @@ from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.logcontext import LoggingContext, run_in_background
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
-from twisted.internet import reactor, defer
-from twisted.web.resource import NoResource
 
 logger = logging.getLogger("synapse.app.user_dir")
 
@@ -93,10 +94,7 @@ class UserDirectorySlaveStore(
 
 
 class UserDirectoryServer(HomeServer):
-    def setup(self):
-        logger.info("Setting up.")
-        self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
-        logger.info("Finished setting up.")
+    DATASTORE_CLASS = UserDirectorySlaveStore
 
     def _listen_http(self, listener_config):
         port = listener_config["port"]
@@ -150,7 +148,7 @@ class UserDirectoryServer(HomeServer):
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
                     logger.warn(("Metrics listener configured, but "
-                                 "collect_metrics is not enabled!"))
+                                 "enable_metrics is not True!"))
                 else:
                     _base.listen_metrics(listener["bind_addresses"],
                                          listener["port"])
@@ -168,8 +166,9 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler):
         super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
         self.user_directory = hs.get_user_directory_handler()
 
+    @defer.inlineCallbacks
     def on_rdata(self, stream_name, token, rows):
-        super(UserDirectoryReplicationHandler, self).on_rdata(
+        yield super(UserDirectoryReplicationHandler, self).on_rdata(
             stream_name, token, rows
         )
         if stream_name == "current_state_deltas":
@@ -213,11 +212,13 @@ def start(config_options):
     config.update_user_directory = True
 
     tls_server_context_factory = context_factory.ServerContextFactory(config)
+    tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
 
     ps = UserDirectoryServer(
         config.server_name,
         db_config=config.database_config,
         tls_server_context_factory=tls_server_context_factory,
+        tls_client_options_factory=tls_client_options_factory,
         config=config,
         version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index d1c598622a..57ed8a3ca2 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -12,17 +12,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from synapse.api.constants import EventTypes
-from synapse.util.caches.descriptors import cachedInlineCallbacks
-from synapse.types import GroupID, get_domain_from_id
-
-from twisted.internet import defer
-
 import logging
 import re
 
 from six import string_types
 
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes
+from synapse.types import GroupID, get_domain_from_id
+from synapse.util.caches.descriptors import cachedInlineCallbacks
+
 logger = logging.getLogger(__name__)
 
 
@@ -85,7 +85,8 @@ class ApplicationService(object):
     NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
 
     def __init__(self, token, hostname, url=None, namespaces=None, hs_token=None,
-                 sender=None, id=None, protocols=None, rate_limited=True):
+                 sender=None, id=None, protocols=None, rate_limited=True,
+                 ip_range_whitelist=None):
         self.token = token
         self.url = url
         self.hs_token = hs_token
@@ -93,6 +94,7 @@ class ApplicationService(object):
         self.server_name = hostname
         self.namespaces = self._check_namespaces(namespaces)
         self.id = id
+        self.ip_range_whitelist = ip_range_whitelist
 
         if "|" in self.id:
             raise Exception("application service ID cannot contain '|' character")
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 47251fb6ad..6980e5890e 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -12,19 +12,19 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import logging
+import urllib
+
+from prometheus_client import Counter
+
 from twisted.internet import defer
 
 from synapse.api.constants import ThirdPartyEntityKind
 from synapse.api.errors import CodeMessageException
-from synapse.http.client import SimpleHttpClient
 from synapse.events.utils import serialize_event
-from synapse.util.caches.response_cache import ResponseCache
+from synapse.http.client import SimpleHttpClient
 from synapse.types import ThirdPartyInstanceID
-
-import logging
-import urllib
-
-from prometheus_client import Counter
+from synapse.util.caches.response_cache import ResponseCache
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 6eddbc0828..2430814796 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -48,14 +48,14 @@ UP & quit           +---------- YES                       SUCCESS
 This is all tied together by the AppServiceScheduler which DIs the required
 components.
 """
+import logging
+
 from twisted.internet import defer
 
 from synapse.appservice import ApplicationServiceState
 from synapse.util.logcontext import run_in_background
 from synapse.util.metrics import Measure
 
-import logging
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index b748ed2b0a..3d2e90dd5b 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -16,11 +16,12 @@
 import argparse
 import errno
 import os
-import yaml
 from textwrap import dedent
 
 from six import integer_types
 
+import yaml
+
 
 class ConfigError(Exception):
     pass
diff --git a/synapse/config/api.py b/synapse/config/api.py
index 20ba33226a..403d96ba76 100644
--- a/synapse/config/api.py
+++ b/synapse/config/api.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config
-
 from synapse.api.constants import EventTypes
 
+from ._base import Config
+
 
 class ApiConfig(Config):
 
diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
index 277305e184..3b161d708a 100644
--- a/synapse/config/appservice.py
+++ b/synapse/config/appservice.py
@@ -12,17 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config, ConfigError
-
-from synapse.appservice import ApplicationService
-from synapse.types import UserID
-
-import yaml
 import logging
 
 from six import string_types
 from six.moves.urllib import parse as urlparse
 
+import yaml
+from netaddr import IPSet
+
+from synapse.appservice import ApplicationService
+from synapse.types import UserID
+
+from ._base import Config, ConfigError
+
 logger = logging.getLogger(__name__)
 
 
@@ -154,6 +156,13 @@ def _load_appservice(hostname, as_info, config_filename):
             " will not receive events or queries.",
             config_filename,
         )
+
+    ip_range_whitelist = None
+    if as_info.get('ip_range_whitelist'):
+        ip_range_whitelist = IPSet(
+            as_info.get('ip_range_whitelist')
+        )
+
     return ApplicationService(
         token=as_info["as_token"],
         hostname=hostname,
@@ -163,5 +172,6 @@ def _load_appservice(hostname, as_info, config_filename):
         sender=user_id,
         id=as_info["id"],
         protocols=protocols,
-        rate_limited=rate_limited
+        rate_limited=rate_limited,
+        ip_range_whitelist=ip_range_whitelist,
     )
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 1dea2ad024..2fd9c48abf 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -13,32 +13,32 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from .tls import TlsConfig
-from .server import ServerConfig
-from .logger import LoggingConfig
-from .database import DatabaseConfig
-from .ratelimiting import RatelimitConfig
-from .repository import ContentRepositoryConfig
-from .captcha import CaptchaConfig
-from .voip import VoipConfig
-from .registration import RegistrationConfig
-from .metrics import MetricsConfig
 from .api import ApiConfig
 from .appservice import AppServiceConfig
-from .key import KeyConfig
-from .saml2 import SAML2Config
+from .captcha import CaptchaConfig
 from .cas import CasConfig
-from .password import PasswordConfig
+from .consent_config import ConsentConfig
+from .database import DatabaseConfig
+from .emailconfig import EmailConfig
+from .groups import GroupsConfig
 from .jwt import JWTConfig
+from .key import KeyConfig
+from .logger import LoggingConfig
+from .metrics import MetricsConfig
+from .password import PasswordConfig
 from .password_auth_providers import PasswordAuthProviderConfig
-from .emailconfig import EmailConfig
-from .workers import WorkerConfig
 from .push import PushConfig
+from .ratelimiting import RatelimitConfig
+from .registration import RegistrationConfig
+from .repository import ContentRepositoryConfig
+from .saml2 import SAML2Config
+from .server import ServerConfig
+from .server_notices_config import ServerNoticesConfig
 from .spam_checker import SpamCheckerConfig
-from .groups import GroupsConfig
+from .tls import TlsConfig
 from .user_directory import UserDirectoryConfig
-from .consent_config import ConsentConfig
-from .server_notices_config import ServerNoticesConfig
+from .voip import VoipConfig
+from .workers import WorkerConfig
 
 
 class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py
index 47f145c589..51e7f7e003 100644
--- a/synapse/config/jwt.py
+++ b/synapse/config/jwt.py
@@ -15,7 +15,6 @@
 
 from ._base import Config, ConfigError
 
-
 MISSING_JWT = (
     """Missing jwt library. This is required for jwt login.
 
diff --git a/synapse/config/key.py b/synapse/config/key.py
index d1382ad9ac..279c47bb48 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -13,21 +13,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config, ConfigError
+import hashlib
+import logging
+import os
 
-from synapse.util.stringutils import random_string
 from signedjson.key import (
-    generate_signing_key, is_signing_algorithm_supported,
-    decode_signing_key_base64, decode_verify_key_bytes,
-    read_signing_keys, write_signing_keys, NACL_ED25519
+    NACL_ED25519,
+    decode_signing_key_base64,
+    decode_verify_key_bytes,
+    generate_signing_key,
+    is_signing_algorithm_supported,
+    read_signing_keys,
+    write_signing_keys,
 )
 from unpaddedbase64 import decode_base64
-from synapse.util.stringutils import random_string_with_symbols
 
-import os
-import hashlib
-import logging
+from synapse.util.stringutils import random_string, random_string_with_symbols
 
+from ._base import Config, ConfigError
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 6a7228dc2f..3f187adfc8 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -12,17 +12,22 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-from ._base import Config
-from synapse.util.logcontext import LoggingContextFilter
-from twisted.logger import globalLogBeginner, STDLibLogObserver
 import logging
 import logging.config
-import yaml
-from string import Template
 import os
 import signal
+import sys
+from string import Template
+
+import yaml
+
+from twisted.logger import STDLibLogObserver, globalLogBeginner
 
+import synapse
+from synapse.util.logcontext import LoggingContextFilter
+from synapse.util.versionstring import get_version_string
+
+from ._base import Config
 
 DEFAULT_LOG_CONFIG = Template("""
 version: 1
@@ -163,7 +168,8 @@ def setup_logging(config, use_worker_options=False):
         if log_file:
             # TODO: Customisable file size / backup count
             handler = logging.handlers.RotatingFileHandler(
-                log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
+                log_file, maxBytes=(1000 * 1000 * 100), backupCount=3,
+                encoding='utf8'
             )
 
             def sighup(signum, stack):
@@ -188,9 +194,8 @@ def setup_logging(config, use_worker_options=False):
 
         def sighup(signum, stack):
             # it might be better to use a file watcher or something for this.
-            logging.info("Reloading log config from %s due to SIGHUP",
-                         log_config)
             load_log_config()
+            logging.info("Reloaded log config from %s due to SIGHUP", log_config)
 
         load_log_config()
 
@@ -202,6 +207,15 @@ def setup_logging(config, use_worker_options=False):
     if getattr(signal, "SIGHUP"):
         signal.signal(signal.SIGHUP, sighup)
 
+    # make sure that the first thing we log is a thing we can grep backwards
+    # for
+    logging.warn("***** STARTING SERVER *****")
+    logging.warn(
+        "Server %s version %s",
+        sys.argv[0], get_version_string(synapse),
+    )
+    logging.info("Server hostname: %s", config.server_name)
+
     # It's critical to point twisted's internal logging somewhere, otherwise it
     # stacks up and leaks kup to 64K object;
     # see: https://twistedmatrix.com/trac/ticket/8164
diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py
index 6602c5b4c7..f4066abc28 100644
--- a/synapse/config/password_auth_providers.py
+++ b/synapse/config/password_auth_providers.py
@@ -13,10 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config
-
 from synapse.util.module_loader import load_module
 
+from ._base import Config
+
 LDAP_PROVIDER = 'ldap_auth_provider.LdapAuthProvider'
 
 
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index c5384b3ad4..0fb964eb67 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -13,11 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config
+from distutils.util import strtobool
 
 from synapse.util.stringutils import random_string_with_symbols
 
-from distutils.util import strtobool
+from ._base import Config
 
 
 class RegistrationConfig(Config):
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 81ecf9778c..fc909c1fac 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -13,11 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config, ConfigError
 from collections import namedtuple
 
 from synapse.util.module_loader import load_module
 
+from ._base import Config, ConfigError
 
 MISSING_NETADDR = (
     "Missing netaddr library. This is required for URL preview API."
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 968ecd9ea0..c1c7c0105e 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -16,6 +16,8 @@
 
 import logging
 
+from synapse.http.endpoint import parse_and_validate_server_name
+
 from ._base import Config, ConfigError
 
 logger = logging.Logger(__name__)
@@ -25,6 +27,12 @@ class ServerConfig(Config):
 
     def read_config(self, config):
         self.server_name = config["server_name"]
+
+        try:
+            parse_and_validate_server_name(self.server_name)
+        except ValueError as e:
+            raise ConfigError(str(e))
+
         self.pid_file = self.abspath(config.get("pid_file"))
         self.web_client = config["web_client"]
         self.web_client_location = config.get("web_client_location", None)
@@ -41,6 +49,9 @@ class ServerConfig(Config):
         # "disable" federation
         self.send_federation = config.get("send_federation", True)
 
+        # Whether to enable user presence.
+        self.use_presence = config.get("use_presence", True)
+
         # Whether to update the user directory or not. This should be set to
         # false only if we are updating the user directory in a worker
         self.update_user_directory = config.get("update_user_directory", True)
@@ -59,6 +70,31 @@ class ServerConfig(Config):
             "block_non_admin_invites", False,
         )
 
+        # Options to control access by tracking MAU
+        self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
+        self.max_mau_value = 0
+        if self.limit_usage_by_mau:
+            self.max_mau_value = config.get(
+                "max_mau_value", 0,
+            )
+
+        self.mau_limits_reserved_threepids = config.get(
+            "mau_limit_reserved_threepids", []
+        )
+
+        self.mau_trial_days = config.get(
+            "mau_trial_days", 0,
+        )
+
+        # Options to disable HS
+        self.hs_disabled = config.get("hs_disabled", False)
+        self.hs_disabled_message = config.get("hs_disabled_message", "")
+        self.hs_disabled_limit_type = config.get("hs_disabled_limit_type", "")
+
+        # Admin uri to direct users at should their instance become blocked
+        # due to resource constraints
+        self.admin_contact = config.get("admin_contact", None)
+
         # FIXME: federation_domain_whitelist needs sytests
         self.federation_domain_whitelist = None
         federation_domain_whitelist = config.get(
@@ -162,8 +198,8 @@ class ServerConfig(Config):
             })
 
     def default_config(self, server_name, **kwargs):
-        if ":" in server_name:
-            bind_port = int(server_name.split(":")[1])
+        _, bind_port = parse_and_validate_server_name(server_name)
+        if bind_port is not None:
             unsecure_port = bind_port - 400
         else:
             bind_port = 8448
@@ -201,6 +237,8 @@ class ServerConfig(Config):
         # different cores. See
         # https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
         #
+        # This setting requires the affinity package to be installed!
+        #
         # cpu_affinity: 0xFFFFFFFF
 
         # Whether to serve a web client from the HTTP/HTTPS root resource.
@@ -220,6 +258,9 @@ class ServerConfig(Config):
         # hard limit.
         soft_file_limit: 0
 
+        # Set to false to disable presence tracking on this homeserver.
+        use_presence: true
+
         # The GC threshold parameters to pass to `gc.set_threshold`, if defined
         # gc_thresholds: [700, 10, 10]
 
@@ -311,6 +352,33 @@ class ServerConfig(Config):
           # - port: 9000
           #   bind_addresses: ['::1', '127.0.0.1']
           #   type: manhole
+
+
+          # Homeserver blocking
+          #
+          # How to reach the server admin, used in ResourceLimitError
+          # admin_contact: 'mailto:admin@server.com'
+          #
+          # Global block config
+          #
+          # hs_disabled: False
+          # hs_disabled_message: 'Human readable reason for why the HS is blocked'
+          # hs_disabled_limit_type: 'error code(str), to help clients decode reason'
+          #
+          # Monthly Active User Blocking
+          #
+          # Enables monthly active user checking
+          # limit_usage_by_mau: False
+          # max_mau_value: 50
+          # mau_trial_days: 2
+          #
+          # Sometimes the server admin will want to ensure certain accounts are
+          # never blocked by mau checking. These accounts are specified here.
+          #
+          # mau_limit_reserved_threepids:
+          # - medium: 'email'
+          #   address: 'reserved_user@example.com'
+
         """ % locals()
 
     def read_arguments(self, args):
@@ -336,6 +404,23 @@ class ServerConfig(Config):
                                   " service on the given port.")
 
 
+def is_threepid_reserved(config, threepid):
+    """Check the threepid against the reserved threepid config
+    Args:
+        config(ServerConfig) - to access server config attributes
+        threepid(dict) - The threepid to test for
+
+    Returns:
+        boolean Is the threepid undertest reserved_user
+    """
+
+    for tp in config.mau_limits_reserved_threepids:
+        if (threepid['medium'] == tp['medium']
+                and threepid['address'] == tp['address']):
+            return True
+    return False
+
+
 def read_gc_thresholds(thresholds):
     """Reads the three integer thresholds for garbage collection. Ensures that
     the thresholds are integers if thresholds are supplied.
diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices_config.py
index be1d1f762c..3c39850ac6 100644
--- a/synapse/config/server_notices_config.py
+++ b/synapse/config/server_notices_config.py
@@ -12,9 +12,10 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from ._base import Config
 from synapse.types import UserID
 
+from ._base import Config
+
 DEFAULT_CONFIG = """\
 # Server Notices room configuration
 #
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index b66154bc7c..fef1ea99cb 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -13,15 +13,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config
-
-from OpenSSL import crypto
-import subprocess
 import os
-
+import subprocess
 from hashlib import sha256
+
 from unpaddedbase64 import encode_base64
 
+from OpenSSL import crypto
+
+from ._base import Config
+
 GENERATE_DH_PARAMS = False
 
 
diff --git a/synapse/config/voip.py b/synapse/config/voip.py
index 3a4e16fa96..d07bd24ffd 100644
--- a/synapse/config/voip.py
+++ b/synapse/config/voip.py
@@ -30,10 +30,10 @@ class VoipConfig(Config):
         ## Turn ##
 
         # The public URIs of the TURN server to give to clients
-        turn_uris: []
+        #turn_uris: []
 
         # The shared secret used to compute passwords for the TURN server
-        turn_shared_secret: "YOUR_SHARED_SECRET"
+        #turn_shared_secret: "YOUR_SHARED_SECRET"
 
         # The Username and password if the TURN server needs them and
         # does not use a token
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index 0397f73ab4..1a391adec1 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -11,19 +11,22 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import logging
+
+from zope.interface import implementer
 
-from twisted.internet import ssl
 from OpenSSL import SSL, crypto
 from twisted.internet._sslverify import _defaultCurveName
-
-import logging
+from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
+from twisted.internet.ssl import CertificateOptions, ContextFactory
+from twisted.python.failure import Failure
 
 logger = logging.getLogger(__name__)
 
 
-class ServerContextFactory(ssl.ContextFactory):
+class ServerContextFactory(ContextFactory):
     """Factory for PyOpenSSL SSL contexts that are used to handle incoming
-    connections and to make connections to remote servers."""
+    connections."""
 
     def __init__(self, config):
         self._context = SSL.Context(SSL.SSLv23_METHOD)
@@ -48,3 +51,78 @@ class ServerContextFactory(ssl.ContextFactory):
 
     def getContext(self):
         return self._context
+
+
+def _idnaBytes(text):
+    """
+    Convert some text typed by a human into some ASCII bytes. This is a
+    copy of twisted.internet._idna._idnaBytes. For documentation, see the
+    twisted documentation.
+    """
+    try:
+        import idna
+    except ImportError:
+        return text.encode("idna")
+    else:
+        return idna.encode(text)
+
+
+def _tolerateErrors(wrapped):
+    """
+    Wrap up an info_callback for pyOpenSSL so that if something goes wrong
+    the error is immediately logged and the connection is dropped if possible.
+    This is a copy of twisted.internet._sslverify._tolerateErrors. For
+    documentation, see the twisted documentation.
+    """
+
+    def infoCallback(connection, where, ret):
+        try:
+            return wrapped(connection, where, ret)
+        except:  # noqa: E722, taken from the twisted implementation
+            f = Failure()
+            logger.exception("Error during info_callback")
+            connection.get_app_data().failVerification(f)
+
+    return infoCallback
+
+
+@implementer(IOpenSSLClientConnectionCreator)
+class ClientTLSOptions(object):
+    """
+    Client creator for TLS without certificate identity verification. This is a
+    copy of twisted.internet._sslverify.ClientTLSOptions with the identity
+    verification left out. For documentation, see the twisted documentation.
+    """
+
+    def __init__(self, hostname, ctx):
+        self._ctx = ctx
+        self._hostname = hostname
+        self._hostnameBytes = _idnaBytes(hostname)
+        ctx.set_info_callback(
+            _tolerateErrors(self._identityVerifyingInfoCallback)
+        )
+
+    def clientConnectionForTLS(self, tlsProtocol):
+        context = self._ctx
+        connection = SSL.Connection(context, None)
+        connection.set_app_data(tlsProtocol)
+        return connection
+
+    def _identityVerifyingInfoCallback(self, connection, where, ret):
+        if where & SSL.SSL_CB_HANDSHAKE_START:
+            connection.set_tlsext_host_name(self._hostnameBytes)
+
+
+class ClientTLSOptionsFactory(object):
+    """Factory for Twisted ClientTLSOptions that are used to make connections
+    to remote servers for federation."""
+
+    def __init__(self, config):
+        # We don't use config options yet
+        pass
+
+    def get_options(self, host):
+        return ClientTLSOptions(
+            host.decode('utf-8'),
+            CertificateOptions(verify=False).getContext()
+        )
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index aaa3efaca3..8774b28967 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -15,15 +15,15 @@
 # limitations under the License.
 
 
-from synapse.api.errors import SynapseError, Codes
-from synapse.events.utils import prune_event
+import hashlib
+import logging
 
 from canonicaljson import encode_canonical_json
-from unpaddedbase64 import encode_base64, decode_base64
 from signedjson.sign import sign_json
+from unpaddedbase64 import decode_base64, encode_base64
 
-import hashlib
-import logging
+from synapse.api.errors import Codes, SynapseError
+from synapse.events.utils import prune_event
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py
index f1fd488b90..e94400b8e2 100644
--- a/synapse/crypto/keyclient.py
+++ b/synapse/crypto/keyclient.py
@@ -13,14 +13,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.util import logcontext
-from twisted.web.http import HTTPClient
-from twisted.internet.protocol import Factory
-from twisted.internet import defer, reactor
-from synapse.http.endpoint import matrix_federation_endpoint
-import simplejson as json
 import logging
 
+from canonicaljson import json
+
+from twisted.internet import defer, reactor
+from twisted.internet.error import ConnectError
+from twisted.internet.protocol import Factory
+from twisted.names.error import DomainError
+from twisted.web.http import HTTPClient
+
+from synapse.http.endpoint import matrix_federation_endpoint
+from synapse.util import logcontext
 
 logger = logging.getLogger(__name__)
 
@@ -28,14 +32,14 @@ KEY_API_V1 = b"/_matrix/key/v1/"
 
 
 @defer.inlineCallbacks
-def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
+def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
     """Fetch the keys for a remote server."""
 
     factory = SynapseKeyClientFactory()
     factory.path = path
     factory.host = server_name
     endpoint = matrix_federation_endpoint(
-        reactor, server_name, ssl_context_factory, timeout=30
+        reactor, server_name, tls_client_options_factory, timeout=30
     )
 
     for i in range(5):
@@ -45,12 +49,14 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
                 server_response, server_certificate = yield protocol.remote_key
                 defer.returnValue((server_response, server_certificate))
         except SynapseKeyClientError as e:
-            logger.exception("Error getting key for %r" % (server_name,))
+            logger.warn("Error getting key for %r: %s", server_name, e)
             if e.status.startswith("4"):
                 # Don't retry for 4xx responses.
                 raise IOError("Cannot get key for %r" % server_name)
+        except (ConnectError, DomainError) as e:
+            logger.warn("Error getting key for %r: %s", server_name, e)
         except Exception as e:
-            logger.exception(e)
+            logger.exception("Error getting key for %r", server_name)
     raise IOError("Cannot get key for %r" % server_name)
 
 
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 9b17ef0a08..30e2742102 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -14,35 +14,37 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.crypto.keyclient import fetch_server_key
-from synapse.api.errors import SynapseError, Codes
-from synapse.util import unwrapFirstError, logcontext
-from synapse.util.logcontext import (
-    PreserveLoggingContext,
-    preserve_fn,
-    run_in_background,
-)
-from synapse.util.metrics import Measure
-
-from twisted.internet import defer
+import hashlib
+import logging
+import urllib
+from collections import namedtuple
 
-from signedjson.sign import (
-    verify_signed_json, signature_ids, sign_json, encode_canonical_json,
-    SignatureVerifyException,
-)
 from signedjson.key import (
-    is_signing_algorithm_supported, decode_verify_key_bytes,
+    decode_verify_key_bytes,
     encode_verify_key_base64,
+    is_signing_algorithm_supported,
+)
+from signedjson.sign import (
+    SignatureVerifyException,
+    encode_canonical_json,
+    sign_json,
+    signature_ids,
+    verify_signed_json,
 )
 from unpaddedbase64 import decode_base64, encode_base64
 
 from OpenSSL import crypto
+from twisted.internet import defer
 
-from collections import namedtuple
-import urllib
-import hashlib
-import logging
-
+from synapse.api.errors import Codes, SynapseError
+from synapse.crypto.keyclient import fetch_server_key
+from synapse.util import logcontext, unwrapFirstError
+from synapse.util.logcontext import (
+    PreserveLoggingContext,
+    preserve_fn,
+    run_in_background,
+)
+from synapse.util.metrics import Measure
 
 logger = logging.getLogger(__name__)
 
@@ -510,7 +512,7 @@ class Keyring(object):
                 continue
 
             (response, tls_certificate) = yield fetch_server_key(
-                server_name, self.hs.tls_server_context_factory,
+                server_name, self.hs.tls_client_options_factory,
                 path=(b"/_matrix/key/v2/server/%s" % (
                     urllib.quote(requested_key_id),
                 )).encode("ascii"),
@@ -653,7 +655,7 @@ class Keyring(object):
         # Try to fetch the key from the remote server.
 
         (response, tls_certificate) = yield fetch_server_key(
-            server_name, self.hs.tls_server_context_factory
+            server_name, self.hs.tls_client_options_factory
         )
 
         # Check the response.
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index eaf9cecde6..6baeccca38 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -17,11 +17,11 @@ import logging
 
 from canonicaljson import encode_canonical_json
 from signedjson.key import decode_verify_key_bytes
-from signedjson.sign import verify_signed_json, SignatureVerifyException
+from signedjson.sign import SignatureVerifyException, verify_signed_json
 from unpaddedbase64 import decode_base64
 
-from synapse.api.constants import EventTypes, Membership, JoinRules
-from synapse.api.errors import AuthError, SynapseError, EventSizeError
+from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, JoinRules, Membership
+from synapse.api.errors import AuthError, EventSizeError, SynapseError
 from synapse.types import UserID, get_domain_from_id
 
 logger = logging.getLogger(__name__)
@@ -34,9 +34,11 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
         event: the event being checked.
         auth_events (dict: event-key -> event): the existing room state.
 
+    Raises:
+        AuthError if the checks fail
 
     Returns:
-        True if the auth checks pass.
+         if the auth checks pass.
     """
     if do_size_check:
         _check_size_limits(event)
@@ -71,17 +73,27 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
         # Oh, we don't know what the state of the room was, so we
         # are trusting that this is allowed (at least for now)
         logger.warn("Trusting event: %s", event.event_id)
-        return True
+        return
 
     if event.type == EventTypes.Create:
+        sender_domain = get_domain_from_id(event.sender)
         room_id_domain = get_domain_from_id(event.room_id)
         if room_id_domain != sender_domain:
             raise AuthError(
                 403,
                 "Creation event's room_id domain does not match sender's"
             )
+
+        room_version = event.content.get("room_version", "1")
+        if room_version not in KNOWN_ROOM_VERSIONS:
+            raise AuthError(
+                403,
+                "room appears to have unsupported version %s" % (
+                    room_version,
+                ))
         # FIXME
-        return True
+        logger.debug("Allowing! %s", event)
+        return
 
     creation_event = auth_events.get((EventTypes.Create, ""), None)
 
@@ -118,7 +130,8 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
                 403,
                 "Alias event's state_key does not match sender's domain"
             )
-        return True
+        logger.debug("Allowing! %s", event)
+        return
 
     if logger.isEnabledFor(logging.DEBUG):
         logger.debug(
@@ -127,14 +140,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
         )
 
     if event.type == EventTypes.Member:
-        allowed = _is_membership_change_allowed(
-            event, auth_events
-        )
-        if allowed:
-            logger.debug("Allowing! %s", event)
-        else:
-            logger.debug("Denying! %s", event)
-        return allowed
+        _is_membership_change_allowed(event, auth_events)
+        logger.debug("Allowing! %s", event)
+        return
 
     _check_event_sender_in_room(event, auth_events)
 
@@ -153,7 +161,8 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
                 )
             )
         else:
-            return True
+            logger.debug("Allowing! %s", event)
+            return
 
     _can_send_event(event, auth_events)
 
@@ -200,7 +209,7 @@ def _is_membership_change_allowed(event, auth_events):
         create = auth_events.get(key)
         if create and event.prev_events[0][0] == create.event_id:
             if create.content["creator"] == event.state_key:
-                return True
+                return
 
     target_user_id = event.state_key
 
@@ -265,13 +274,13 @@ def _is_membership_change_allowed(event, auth_events):
             raise AuthError(
                 403, "%s is banned from the room" % (target_user_id,)
             )
-        return True
+        return
 
     if Membership.JOIN != membership:
         if (caller_invited
                 and Membership.LEAVE == membership
                 and target_user_id == event.user_id):
-            return True
+            return
 
         if not caller_in_room:  # caller isn't joined
             raise AuthError(
@@ -334,8 +343,6 @@ def _is_membership_change_allowed(event, auth_events):
     else:
         raise AuthError(500, "Unknown membership %s" % membership)
 
-    return True
-
 
 def _check_event_sender_in_room(event, auth_events):
     key = (EventTypes.Member, event.user_id, )
@@ -355,35 +362,46 @@ def _check_joined_room(member, user_id, room_id):
         ))
 
 
-def get_send_level(etype, state_key, auth_events):
-    key = (EventTypes.PowerLevels, "", )
-    send_level_event = auth_events.get(key)
-    send_level = None
-    if send_level_event:
-        send_level = send_level_event.content.get("events", {}).get(
-            etype
-        )
-        if send_level is None:
-            if state_key is not None:
-                send_level = send_level_event.content.get(
-                    "state_default", 50
-                )
-            else:
-                send_level = send_level_event.content.get(
-                    "events_default", 0
-                )
+def get_send_level(etype, state_key, power_levels_event):
+    """Get the power level required to send an event of a given type
+
+    The federation spec [1] refers to this as "Required Power Level".
+
+    https://matrix.org/docs/spec/server_server/unstable.html#definitions
 
-    if send_level:
-        send_level = int(send_level)
+    Args:
+        etype (str): type of event
+        state_key (str|None): state_key of state event, or None if it is not
+            a state event.
+        power_levels_event (synapse.events.EventBase|None): power levels event
+            in force at this point in the room
+    Returns:
+        int: power level required to send this event.
+    """
+
+    if power_levels_event:
+        power_levels_content = power_levels_event.content
     else:
-        send_level = 0
+        power_levels_content = {}
+
+    # see if we have a custom level for this event type
+    send_level = power_levels_content.get("events", {}).get(etype)
 
-    return send_level
+    # otherwise, fall back to the state_default/events_default.
+    if send_level is None:
+        if state_key is not None:
+            send_level = power_levels_content.get("state_default", 50)
+        else:
+            send_level = power_levels_content.get("events_default", 0)
+
+    return int(send_level)
 
 
 def _can_send_event(event, auth_events):
+    power_levels_event = _get_power_level_event(auth_events)
+
     send_level = get_send_level(
-        event.type, event.get("state_key", None), auth_events
+        event.type, event.get("state_key"), power_levels_event,
     )
     user_level = get_user_power_level(event.user_id, auth_events)
 
@@ -515,7 +533,11 @@ def _check_power_levels(event, auth_events):
                     "to your own"
                 )
 
-        if old_level > user_level or new_level > user_level:
+        # Check if the old and new levels are greater than the user level
+        # (if defined)
+        old_level_too_big = old_level is not None and old_level > user_level
+        new_level_too_big = new_level is not None and new_level > user_level
+        if old_level_too_big or new_level_too_big:
             raise AuthError(
                 403,
                 "You don't have permission to add ops level greater "
@@ -524,13 +546,22 @@ def _check_power_levels(event, auth_events):
 
 
 def _get_power_level_event(auth_events):
-    key = (EventTypes.PowerLevels, "", )
-    return auth_events.get(key)
+    return auth_events.get((EventTypes.PowerLevels, ""))
 
 
 def get_user_power_level(user_id, auth_events):
-    power_level_event = _get_power_level_event(auth_events)
+    """Get a user's power level
+
+    Args:
+        user_id (str): user's id to look up in power_levels
+        auth_events (dict[(str, str), synapse.events.EventBase]):
+            state in force at this point in the room (or rather, a subset of
+            it including at least the create event and power levels event.
 
+    Returns:
+        int: the user's power level in this room.
+    """
+    power_level_event = _get_power_level_event(auth_events)
     if power_level_event:
         level = power_level_event.content.get("users", {}).get(user_id)
         if not level:
@@ -541,6 +572,11 @@ def get_user_power_level(user_id, auth_events):
         else:
             return int(level)
     else:
+        # if there is no power levels event, the creator gets 100 and everyone
+        # else gets 0.
+
+        # some things which call this don't pass the create event: hack around
+        # that.
         key = (EventTypes.Create, "", )
         create_event = auth_events.get(key)
         if (create_event is not None and
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index cb08da4984..51f9084b90 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -13,9 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.util.frozenutils import freeze
 from synapse.util.caches import intern_dict
-
+from synapse.util.frozenutils import freeze
 
 # Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
 # bugs where we accidentally share e.g. signature dicts. However, converting
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 13fbba68c0..e662eaef10 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -13,13 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from . import EventBase, FrozenEvent, _event_dict_property
+import copy
 
 from synapse.types import EventID
-
 from synapse.util.stringutils import random_string
 
-import copy
+from . import EventBase, FrozenEvent, _event_dict_property
 
 
 class EventBuilder(EventBase):
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 8e684d91b5..368b5f6ae4 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -13,22 +13,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
+from six import iteritems
 
 from frozendict import frozendict
 
+from twisted.internet import defer
+
+from synapse.util.logcontext import make_deferred_yieldable, run_in_background
+
 
 class EventContext(object):
     """
     Attributes:
-        current_state_ids (dict[(str, str), str]):
-            The current state map including the current event.
-            (type, state_key) -> event_id
-
-        prev_state_ids (dict[(str, str), str]):
-            The current state map excluding the current event.
-            (type, state_key) -> event_id
-
         state_group (int|None): state group id, if the state has been stored
             as a state group. This is usually only None if e.g. the event is
             an outlier.
@@ -45,38 +41,77 @@ class EventContext(object):
 
         prev_state_events (?): XXX: is this ever set to anything other than
             the empty list?
+
+        _current_state_ids (dict[(str, str), str]|None):
+            The current state map including the current event. None if outlier
+            or we haven't fetched the state from DB yet.
+            (type, state_key) -> event_id
+
+        _prev_state_ids (dict[(str, str), str]|None):
+            The current state map excluding the current event. None if outlier
+            or we haven't fetched the state from DB yet.
+            (type, state_key) -> event_id
+
+        _fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
+            been calculated. None if we haven't started calculating yet
+
+        _event_type (str): The type of the event the context is associated with.
+            Only set when state has not been fetched yet.
+
+        _event_state_key (str|None): The state_key of the event the context is
+            associated with. Only set when state has not been fetched yet.
+
+        _prev_state_id (str|None): If the event associated with the context is
+            a state event, then `_prev_state_id` is the event_id of the state
+            that was replaced.
+            Only set when state has not been fetched yet.
     """
 
     __slots__ = [
-        "current_state_ids",
-        "prev_state_ids",
         "state_group",
         "rejected",
         "prev_group",
         "delta_ids",
         "prev_state_events",
         "app_service",
+        "_current_state_ids",
+        "_prev_state_ids",
+        "_prev_state_id",
+        "_event_type",
+        "_event_state_key",
+        "_fetching_state_deferred",
     ]
 
     def __init__(self):
+        self.prev_state_events = []
+        self.rejected = False
+        self.app_service = None
+
+    @staticmethod
+    def with_state(state_group, current_state_ids, prev_state_ids,
+                   prev_group=None, delta_ids=None):
+        context = EventContext()
+
         # The current state including the current event
-        self.current_state_ids = None
+        context._current_state_ids = current_state_ids
         # The current state excluding the current event
-        self.prev_state_ids = None
-        self.state_group = None
+        context._prev_state_ids = prev_state_ids
+        context.state_group = state_group
 
-        self.rejected = False
+        context._prev_state_id = None
+        context._event_type = None
+        context._event_state_key = None
+        context._fetching_state_deferred = defer.succeed(None)
 
         # A previously persisted state group and a delta between that
         # and this state.
-        self.prev_group = None
-        self.delta_ids = None
+        context.prev_group = prev_group
+        context.delta_ids = delta_ids
 
-        self.prev_state_events = None
+        return context
 
-        self.app_service = None
-
-    def serialize(self, event):
+    @defer.inlineCallbacks
+    def serialize(self, event, store):
         """Converts self to a type that can be serialized as JSON, and then
         deserialized by `deserialize`
 
@@ -92,11 +127,12 @@ class EventContext(object):
         # the prev_state_ids, so if we're a state event we include the event
         # id that we replaced in the state.
         if event.is_state():
-            prev_state_id = self.prev_state_ids.get((event.type, event.state_key))
+            prev_state_ids = yield self.get_prev_state_ids(store)
+            prev_state_id = prev_state_ids.get((event.type, event.state_key))
         else:
             prev_state_id = None
 
-        return {
+        defer.returnValue({
             "prev_state_id": prev_state_id,
             "event_type": event.type,
             "event_state_key": event.state_key if event.is_state() else None,
@@ -106,10 +142,9 @@ class EventContext(object):
             "delta_ids": _encode_state_dict(self.delta_ids),
             "prev_state_events": self.prev_state_events,
             "app_service_id": self.app_service.id if self.app_service else None
-        }
+        })
 
     @staticmethod
-    @defer.inlineCallbacks
     def deserialize(store, input):
         """Converts a dict that was produced by `serialize` back into a
         EventContext.
@@ -122,32 +157,115 @@ class EventContext(object):
             EventContext
         """
         context = EventContext()
+
+        # We use the state_group and prev_state_id stuff to pull the
+        # current_state_ids out of the DB and construct prev_state_ids.
+        context._prev_state_id = input["prev_state_id"]
+        context._event_type = input["event_type"]
+        context._event_state_key = input["event_state_key"]
+
+        context._current_state_ids = None
+        context._prev_state_ids = None
+        context._fetching_state_deferred = None
+
         context.state_group = input["state_group"]
-        context.rejected = input["rejected"]
         context.prev_group = input["prev_group"]
         context.delta_ids = _decode_state_dict(input["delta_ids"])
+
+        context.rejected = input["rejected"]
         context.prev_state_events = input["prev_state_events"]
 
-        # We use the state_group and prev_state_id stuff to pull the
-        # current_state_ids out of the DB and construct prev_state_ids.
-        prev_state_id = input["prev_state_id"]
-        event_type = input["event_type"]
-        event_state_key = input["event_state_key"]
+        app_service_id = input["app_service_id"]
+        if app_service_id:
+            context.app_service = store.get_app_service_by_id(app_service_id)
+
+        return context
+
+    @defer.inlineCallbacks
+    def get_current_state_ids(self, store):
+        """Gets the current state IDs
+
+        Returns:
+            Deferred[dict[(str, str), str]|None]: Returns None if state_group
+            is None, which happens when the associated event is an outlier.
+        """
+
+        if not self._fetching_state_deferred:
+            self._fetching_state_deferred = run_in_background(
+                self._fill_out_state, store,
+            )
+
+        yield make_deferred_yieldable(self._fetching_state_deferred)
+
+        defer.returnValue(self._current_state_ids)
+
+    @defer.inlineCallbacks
+    def get_prev_state_ids(self, store):
+        """Gets the prev state IDs
+
+        Returns:
+            Deferred[dict[(str, str), str]|None]: Returns None if state_group
+            is None, which happens when the associated event is an outlier.
+        """
+
+        if not self._fetching_state_deferred:
+            self._fetching_state_deferred = run_in_background(
+                self._fill_out_state, store,
+            )
 
-        context.current_state_ids = yield store.get_state_ids_for_group(
-            context.state_group,
+        yield make_deferred_yieldable(self._fetching_state_deferred)
+
+        defer.returnValue(self._prev_state_ids)
+
+    def get_cached_current_state_ids(self):
+        """Gets the current state IDs if we have them already cached.
+
+        Returns:
+            dict[(str, str), str]|None: Returns None if we haven't cached the
+            state or if state_group is None, which happens when the associated
+            event is an outlier.
+        """
+
+        return self._current_state_ids
+
+    @defer.inlineCallbacks
+    def _fill_out_state(self, store):
+        """Called to populate the _current_state_ids and _prev_state_ids
+        attributes by loading from the database.
+        """
+        if self.state_group is None:
+            return
+
+        self._current_state_ids = yield store.get_state_ids_for_group(
+            self.state_group,
         )
-        if prev_state_id and event_state_key:
-            context.prev_state_ids = dict(context.current_state_ids)
-            context.prev_state_ids[(event_type, event_state_key)] = prev_state_id
+        if self._prev_state_id and self._event_state_key is not None:
+            self._prev_state_ids = dict(self._current_state_ids)
+
+            key = (self._event_type, self._event_state_key)
+            self._prev_state_ids[key] = self._prev_state_id
         else:
-            context.prev_state_ids = context.current_state_ids
+            self._prev_state_ids = self._current_state_ids
 
-        app_service_id = input["app_service_id"]
-        if app_service_id:
-            context.app_service = store.get_app_service_by_id(app_service_id)
+    @defer.inlineCallbacks
+    def update_state(self, state_group, prev_state_ids, current_state_ids,
+                     prev_group, delta_ids):
+        """Replace the state in the context
+        """
+
+        # We need to make sure we wait for any ongoing fetching of state
+        # to complete so that the updated state doesn't get clobbered
+        if self._fetching_state_deferred:
+            yield make_deferred_yieldable(self._fetching_state_deferred)
+
+        self.state_group = state_group
+        self._prev_state_ids = prev_state_ids
+        self.prev_group = prev_group
+        self._current_state_ids = current_state_ids
+        self.delta_ids = delta_ids
 
-        defer.returnValue(context)
+        # We need to ensure that that we've marked as having fetched the state
+        self._fetching_state_deferred = defer.succeed(None)
 
 
 def _encode_state_dict(state_dict):
@@ -159,7 +277,7 @@ def _encode_state_dict(state_dict):
 
     return [
         (etype, state_key, v)
-        for (etype, state_key), v in state_dict.iteritems()
+        for (etype, state_key), v in iteritems(state_dict)
     ]
 
 
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 29ae086786..652941ca0d 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -13,14 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.api.constants import EventTypes
-from . import EventBase
+import re
+
+from six import string_types
 
 from frozendict import frozendict
 
-import re
+from synapse.api.constants import EventTypes
 
-from six import string_types
+from . import EventBase
 
 # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
 # (?<!stuff) matches if the current position in the string is not preceded
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index e0e5bf818c..cf184748a1 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -13,12 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.types import EventID, RoomID, UserID
-from synapse.api.errors import SynapseError
-from synapse.api.constants import EventTypes, Membership
-
 from six import string_types
 
+from synapse.api.constants import EventTypes, Membership
+from synapse.api.errors import SynapseError
+from synapse.types import EventID, RoomID, UserID
+
 
 class EventValidator(object):
 
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 4cc98a3fe8..c11798093d 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -16,14 +16,15 @@ import logging
 
 import six
 
+from twisted.internet import defer
+
 from synapse.api.constants import MAX_DEPTH
-from synapse.api.errors import SynapseError, Codes
+from synapse.api.errors import Codes, SynapseError
 from synapse.crypto.event_signing import check_event_content_hash
 from synapse.events import FrozenEvent
 from synapse.events.utils import prune_event
-from synapse.http.servlet import assert_params_in_request
-from synapse.util import unwrapFirstError, logcontext
-from twisted.internet import defer
+from synapse.http.servlet import assert_params_in_dict
+from synapse.util import logcontext, unwrapFirstError
 
 logger = logging.getLogger(__name__)
 
@@ -198,7 +199,7 @@ def event_from_pdu_json(pdu_json, outlier=False):
     """
     # we could probably enforce a bunch of other fields here (room_id, sender,
     # origin, etc etc)
-    assert_params_in_request(pdu_json, ('event_id', 'type', 'depth'))
+    assert_params_in_dict(pdu_json, ('event_id', 'type', 'depth'))
 
     depth = pdu_json['depth']
     if not isinstance(depth, six.integer_types):
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 87a92f6ea9..c9f3c2d352 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -21,25 +21,25 @@ import random
 
 from six.moves import range
 
+from prometheus_client import Counter
+
 from twisted.internet import defer
 
-from synapse.api.constants import Membership
+from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
 from synapse.api.errors import (
-    CodeMessageException, HttpResponseException, SynapseError, FederationDeniedError
+    CodeMessageException,
+    FederationDeniedError,
+    HttpResponseException,
+    SynapseError,
 )
 from synapse.events import builder
-from synapse.federation.federation_base import (
-    FederationBase,
-    event_from_pdu_json,
-)
+from synapse.federation.federation_base import FederationBase, event_from_pdu_json
 from synapse.util import logcontext, unwrapFirstError
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 from synapse.util.logutils import log_function
 from synapse.util.retryutils import NotRetryingDestination
 
-from prometheus_client import Counter
-
 logger = logging.getLogger(__name__)
 
 sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["type"])
@@ -48,6 +48,13 @@ sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["t
 PDU_RETRY_TIME_MS = 1 * 60 * 1000
 
 
+class InvalidResponseError(RuntimeError):
+    """Helper for _try_destination_list: indicates that the server returned a response
+    we couldn't parse
+    """
+    pass
+
+
 class FederationClient(FederationBase):
     def __init__(self, hs):
         super(FederationClient, self).__init__(hs)
@@ -458,8 +465,63 @@ class FederationClient(FederationBase):
         defer.returnValue(signed_auth)
 
     @defer.inlineCallbacks
+    def _try_destination_list(self, description, destinations, callback):
+        """Try an operation on a series of servers, until it succeeds
+
+        Args:
+            description (unicode): description of the operation we're doing, for logging
+
+            destinations (Iterable[unicode]): list of server_names to try
+
+            callback (callable):  Function to run for each server. Passed a single
+                argument: the server_name to try. May return a deferred.
+
+                If the callback raises a CodeMessageException with a 300/400 code,
+                attempts to perform the operation stop immediately and the exception is
+                reraised.
+
+                Otherwise, if the callback raises an Exception the error is logged and the
+                next server tried. Normally the stacktrace is logged but this is
+                suppressed if the exception is an InvalidResponseError.
+
+        Returns:
+            The [Deferred] result of callback, if it succeeds
+
+        Raises:
+            SynapseError if the chosen remote server returns a 300/400 code.
+
+            RuntimeError if no servers were reachable.
+        """
+        for destination in destinations:
+            if destination == self.server_name:
+                continue
+
+            try:
+                res = yield callback(destination)
+                defer.returnValue(res)
+            except InvalidResponseError as e:
+                logger.warn(
+                    "Failed to %s via %s: %s",
+                    description, destination, e,
+                )
+            except HttpResponseException as e:
+                if not 500 <= e.code < 600:
+                    raise e.to_synapse_error()
+                else:
+                    logger.warn(
+                        "Failed to %s via %s: %i %s",
+                        description, destination, e.code, e.message,
+                    )
+            except Exception:
+                logger.warn(
+                    "Failed to %s via %s",
+                    description, destination, exc_info=1,
+                )
+
+        raise RuntimeError("Failed to %s via any server" % (description, ))
+
     def make_membership_event(self, destinations, room_id, user_id, membership,
-                              content={},):
+                              content, params):
         """
         Creates an m.room.member event, with context, without participating in the room.
 
@@ -475,13 +537,15 @@ class FederationClient(FederationBase):
             user_id (str): The user whose membership is being evented.
             membership (str): The "membership" property of the event. Must be
                 one of "join" or "leave".
-            content (object): Any additional data to put into the content field
+            content (dict): Any additional data to put into the content field
                 of the event.
+            params (dict[str, str|Iterable[str]]): Query parameters to include in the
+                request.
         Return:
             Deferred: resolves to a tuple of (origin (str), event (object))
             where origin is the remote homeserver which generated the event.
 
-            Fails with a ``CodeMessageException`` if the chosen remote server
+            Fails with a ``SynapseError`` if the chosen remote server
             returns a 300/400 code.
 
             Fails with a ``RuntimeError`` if no servers were reachable.
@@ -492,50 +556,37 @@ class FederationClient(FederationBase):
                 "make_membership_event called with membership='%s', must be one of %s" %
                 (membership, ",".join(valid_memberships))
             )
-        for destination in destinations:
-            if destination == self.server_name:
-                continue
 
-            try:
-                ret = yield self.transport_layer.make_membership_event(
-                    destination, room_id, user_id, membership
-                )
+        @defer.inlineCallbacks
+        def send_request(destination):
+            ret = yield self.transport_layer.make_membership_event(
+                destination, room_id, user_id, membership, params,
+            )
 
-                pdu_dict = ret["event"]
+            pdu_dict = ret.get("event", None)
+            if not isinstance(pdu_dict, dict):
+                raise InvalidResponseError("Bad 'event' field in response")
 
-                logger.debug("Got response to make_%s: %s", membership, pdu_dict)
+            logger.debug("Got response to make_%s: %s", membership, pdu_dict)
 
-                pdu_dict["content"].update(content)
+            pdu_dict["content"].update(content)
 
-                # The protoevent received over the JSON wire may not have all
-                # the required fields. Lets just gloss over that because
-                # there's some we never care about
-                if "prev_state" not in pdu_dict:
-                    pdu_dict["prev_state"] = []
+            # The protoevent received over the JSON wire may not have all
+            # the required fields. Lets just gloss over that because
+            # there's some we never care about
+            if "prev_state" not in pdu_dict:
+                pdu_dict["prev_state"] = []
 
-                ev = builder.EventBuilder(pdu_dict)
+            ev = builder.EventBuilder(pdu_dict)
 
-                defer.returnValue(
-                    (destination, ev)
-                )
-                break
-            except CodeMessageException as e:
-                if not 500 <= e.code < 600:
-                    raise
-                else:
-                    logger.warn(
-                        "Failed to make_%s via %s: %s",
-                        membership, destination, e.message
-                    )
-            except Exception as e:
-                logger.warn(
-                    "Failed to make_%s via %s: %s",
-                    membership, destination, e.message
-                )
+            defer.returnValue(
+                (destination, ev)
+            )
 
-        raise RuntimeError("Failed to send to any server.")
+        return self._try_destination_list(
+            "make_" + membership, destinations, send_request,
+        )
 
-    @defer.inlineCallbacks
     def send_join(self, destinations, pdu):
         """Sends a join event to one of a list of homeservers.
 
@@ -552,103 +603,111 @@ class FederationClient(FederationBase):
             giving the serer the event was sent to, ``state`` (?) and
             ``auth_chain``.
 
-            Fails with a ``CodeMessageException`` if the chosen remote server
+            Fails with a ``SynapseError`` if the chosen remote server
             returns a 300/400 code.
 
             Fails with a ``RuntimeError`` if no servers were reachable.
         """
 
-        for destination in destinations:
-            if destination == self.server_name:
-                continue
-
-            try:
-                time_now = self._clock.time_msec()
-                _, content = yield self.transport_layer.send_join(
-                    destination=destination,
-                    room_id=pdu.room_id,
-                    event_id=pdu.event_id,
-                    content=pdu.get_pdu_json(time_now),
+        def check_authchain_validity(signed_auth_chain):
+            for e in signed_auth_chain:
+                if e.type == EventTypes.Create:
+                    create_event = e
+                    break
+            else:
+                raise InvalidResponseError(
+                    "no %s in auth chain" % (EventTypes.Create,),
                 )
 
-                logger.debug("Got content: %s", content)
+            # the room version should be sane.
+            room_version = create_event.content.get("room_version", "1")
+            if room_version not in KNOWN_ROOM_VERSIONS:
+                # This shouldn't be possible, because the remote server should have
+                # rejected the join attempt during make_join.
+                raise InvalidResponseError(
+                    "room appears to have unsupported version %s" % (
+                        room_version,
+                    ))
+
+        @defer.inlineCallbacks
+        def send_request(destination):
+            time_now = self._clock.time_msec()
+            _, content = yield self.transport_layer.send_join(
+                destination=destination,
+                room_id=pdu.room_id,
+                event_id=pdu.event_id,
+                content=pdu.get_pdu_json(time_now),
+            )
 
-                state = [
-                    event_from_pdu_json(p, outlier=True)
-                    for p in content.get("state", [])
-                ]
+            logger.debug("Got content: %s", content)
 
-                auth_chain = [
-                    event_from_pdu_json(p, outlier=True)
-                    for p in content.get("auth_chain", [])
-                ]
+            state = [
+                event_from_pdu_json(p, outlier=True)
+                for p in content.get("state", [])
+            ]
 
-                pdus = {
-                    p.event_id: p
-                    for p in itertools.chain(state, auth_chain)
-                }
+            auth_chain = [
+                event_from_pdu_json(p, outlier=True)
+                for p in content.get("auth_chain", [])
+            ]
 
-                valid_pdus = yield self._check_sigs_and_hash_and_fetch(
-                    destination, list(pdus.values()),
-                    outlier=True,
-                )
+            pdus = {
+                p.event_id: p
+                for p in itertools.chain(state, auth_chain)
+            }
 
-                valid_pdus_map = {
-                    p.event_id: p
-                    for p in valid_pdus
-                }
-
-                # NB: We *need* to copy to ensure that we don't have multiple
-                # references being passed on, as that causes... issues.
-                signed_state = [
-                    copy.copy(valid_pdus_map[p.event_id])
-                    for p in state
-                    if p.event_id in valid_pdus_map
-                ]
+            valid_pdus = yield self._check_sigs_and_hash_and_fetch(
+                destination, list(pdus.values()),
+                outlier=True,
+            )
 
-                signed_auth = [
-                    valid_pdus_map[p.event_id]
-                    for p in auth_chain
-                    if p.event_id in valid_pdus_map
-                ]
+            valid_pdus_map = {
+                p.event_id: p
+                for p in valid_pdus
+            }
 
-                # NB: We *need* to copy to ensure that we don't have multiple
-                # references being passed on, as that causes... issues.
-                for s in signed_state:
-                    s.internal_metadata = copy.deepcopy(s.internal_metadata)
+            # NB: We *need* to copy to ensure that we don't have multiple
+            # references being passed on, as that causes... issues.
+            signed_state = [
+                copy.copy(valid_pdus_map[p.event_id])
+                for p in state
+                if p.event_id in valid_pdus_map
+            ]
 
-                auth_chain.sort(key=lambda e: e.depth)
+            signed_auth = [
+                valid_pdus_map[p.event_id]
+                for p in auth_chain
+                if p.event_id in valid_pdus_map
+            ]
 
-                defer.returnValue({
-                    "state": signed_state,
-                    "auth_chain": signed_auth,
-                    "origin": destination,
-                })
-            except CodeMessageException as e:
-                if not 500 <= e.code < 600:
-                    raise
-                else:
-                    logger.exception(
-                        "Failed to send_join via %s: %s",
-                        destination, e.message
-                    )
-            except Exception as e:
-                logger.exception(
-                    "Failed to send_join via %s: %s",
-                    destination, e.message
-                )
+            # NB: We *need* to copy to ensure that we don't have multiple
+            # references being passed on, as that causes... issues.
+            for s in signed_state:
+                s.internal_metadata = copy.deepcopy(s.internal_metadata)
 
-        raise RuntimeError("Failed to send to any server.")
+            check_authchain_validity(signed_auth)
+
+            defer.returnValue({
+                "state": signed_state,
+                "auth_chain": signed_auth,
+                "origin": destination,
+            })
+        return self._try_destination_list("send_join", destinations, send_request)
 
     @defer.inlineCallbacks
     def send_invite(self, destination, room_id, event_id, pdu):
         time_now = self._clock.time_msec()
-        code, content = yield self.transport_layer.send_invite(
-            destination=destination,
-            room_id=room_id,
-            event_id=event_id,
-            content=pdu.get_pdu_json(time_now),
-        )
+        try:
+            code, content = yield self.transport_layer.send_invite(
+                destination=destination,
+                room_id=room_id,
+                event_id=event_id,
+                content=pdu.get_pdu_json(time_now),
+            )
+        except HttpResponseException as e:
+            if e.code == 403:
+                raise e.to_synapse_error()
+            raise
 
         pdu_dict = content["event"]
 
@@ -663,7 +722,6 @@ class FederationClient(FederationBase):
 
         defer.returnValue(pdu)
 
-    @defer.inlineCallbacks
     def send_leave(self, destinations, pdu):
         """Sends a leave event to one of a list of homeservers.
 
@@ -680,35 +738,25 @@ class FederationClient(FederationBase):
         Return:
             Deferred: resolves to None.
 
-            Fails with a ``CodeMessageException`` if the chosen remote server
-            returns a non-200 code.
+            Fails with a ``SynapseError`` if the chosen remote server
+            returns a 300/400 code.
 
             Fails with a ``RuntimeError`` if no servers were reachable.
         """
-        for destination in destinations:
-            if destination == self.server_name:
-                continue
-
-            try:
-                time_now = self._clock.time_msec()
-                _, content = yield self.transport_layer.send_leave(
-                    destination=destination,
-                    room_id=pdu.room_id,
-                    event_id=pdu.event_id,
-                    content=pdu.get_pdu_json(time_now),
-                )
+        @defer.inlineCallbacks
+        def send_request(destination):
+            time_now = self._clock.time_msec()
+            _, content = yield self.transport_layer.send_leave(
+                destination=destination,
+                room_id=pdu.room_id,
+                event_id=pdu.event_id,
+                content=pdu.get_pdu_json(time_now),
+            )
 
-                logger.debug("Got content: %s", content)
-                defer.returnValue(None)
-            except CodeMessageException:
-                raise
-            except Exception as e:
-                logger.exception(
-                    "Failed to send_leave via %s: %s",
-                    destination, e.message
-                )
+            logger.debug("Got content: %s", content)
+            defer.returnValue(None)
 
-        raise RuntimeError("Failed to send to any server.")
+        return self._try_destination_list("send_leave", destinations, send_request)
 
     def get_public_rooms(self, destination, limit=None, since_token=None,
                          search_filter=None, include_all_networks=False,
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 2d420a58a2..3e0cd294a1 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -14,28 +14,40 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+import re
 
-import simplejson as json
-from twisted.internet import defer
+import six
+from six import iteritems
 
-from synapse.api.errors import AuthError, FederationError, SynapseError, NotFoundError
-from synapse.crypto.event_signing import compute_event_signature
-from synapse.federation.federation_base import (
-    FederationBase,
-    event_from_pdu_json,
-)
+from canonicaljson import json
+from prometheus_client import Counter
 
+from twisted.internet import defer
+from twisted.internet.abstract import isIPAddress
+from twisted.python import failure
+
+from synapse.api.constants import EventTypes
+from synapse.api.errors import (
+    AuthError,
+    FederationError,
+    IncompatibleRoomVersionError,
+    NotFoundError,
+    SynapseError,
+)
+from synapse.crypto.event_signing import compute_event_signature
+from synapse.federation.federation_base import FederationBase, event_from_pdu_json
 from synapse.federation.persistence import TransactionActions
 from synapse.federation.units import Edu, Transaction
+from synapse.http.endpoint import parse_server_name
+from synapse.replication.http.federation import (
+    ReplicationFederationSendEduRestServlet,
+    ReplicationGetQueryRestServlet,
+)
 from synapse.types import get_domain_from_id
-from synapse.util import async
+from synapse.util.async_helpers import Linearizer, concurrently_execute
 from synapse.util.caches.response_cache import ResponseCache
 from synapse.util.logutils import log_function
 
-from prometheus_client import Counter
-
-from six import iteritems
-
 # when processing incoming transactions, we try to handle multiple rooms in
 # parallel, up to this limit.
 TRANSACTION_CONCURRENCY_LIMIT = 10
@@ -59,8 +71,8 @@ class FederationServer(FederationBase):
         self.auth = hs.get_auth()
         self.handler = hs.get_handlers().federation_handler
 
-        self._server_linearizer = async.Linearizer("fed_server")
-        self._transaction_linearizer = async.Linearizer("fed_txn_handler")
+        self._server_linearizer = Linearizer("fed_server")
+        self._transaction_linearizer = Linearizer("fed_txn_handler")
 
         self.transaction_actions = TransactionActions(self.store)
 
@@ -74,6 +86,9 @@ class FederationServer(FederationBase):
     @log_function
     def on_backfill_request(self, origin, room_id, versions, limit):
         with (yield self._server_linearizer.queue((origin, room_id))):
+            origin_host, _ = parse_server_name(origin)
+            yield self.check_server_matches_acl(origin_host, room_id)
+
             pdus = yield self.handler.on_backfill_request(
                 origin, room_id, versions, limit
             )
@@ -134,6 +149,8 @@ class FederationServer(FederationBase):
 
         received_pdus_counter.inc(len(transaction.pdus))
 
+        origin_host, _ = parse_server_name(transaction.origin)
+
         pdus_by_room = {}
 
         for p in transaction.pdus:
@@ -154,9 +171,21 @@ class FederationServer(FederationBase):
         # we can process different rooms in parallel (which is useful if they
         # require callouts to other servers to fetch missing events), but
         # impose a limit to avoid going too crazy with ram/cpu.
+
         @defer.inlineCallbacks
         def process_pdus_for_room(room_id):
             logger.debug("Processing PDUs for %s", room_id)
+            try:
+                yield self.check_server_matches_acl(origin_host, room_id)
+            except AuthError as e:
+                logger.warn(
+                    "Ignoring PDUs for room %s from banned server", room_id,
+                )
+                for pdu in pdus_by_room[room_id]:
+                    event_id = pdu.event_id
+                    pdu_results[event_id] = e.error_dict()
+                return
+
             for pdu in pdus_by_room[room_id]:
                 event_id = pdu.event_id
                 try:
@@ -168,10 +197,14 @@ class FederationServer(FederationBase):
                     logger.warn("Error handling PDU %s: %s", event_id, e)
                     pdu_results[event_id] = {"error": str(e)}
                 except Exception as e:
+                    f = failure.Failure()
                     pdu_results[event_id] = {"error": str(e)}
-                    logger.exception("Failed to handle PDU %s", event_id)
+                    logger.error(
+                        "Failed to handle PDU %s: %s",
+                        event_id, f.getTraceback().rstrip(),
+                    )
 
-        yield async.concurrently_execute(
+        yield concurrently_execute(
             process_pdus_for_room, pdus_by_room.keys(),
             TRANSACTION_CONCURRENCY_LIMIT,
         )
@@ -184,10 +217,6 @@ class FederationServer(FederationBase):
                     edu.content
                 )
 
-        pdu_failures = getattr(transaction, "pdu_failures", [])
-        for failure in pdu_failures:
-            logger.info("Got failure %r", failure)
-
         response = {
             "pdus": pdu_results,
         }
@@ -211,6 +240,9 @@ class FederationServer(FederationBase):
         if not event_id:
             raise NotImplementedError("Specify an event")
 
+        origin_host, _ = parse_server_name(origin)
+        yield self.check_server_matches_acl(origin_host, room_id)
+
         in_room = yield self.auth.check_host_in_room(room_id, origin)
         if not in_room:
             raise AuthError(403, "Host not in room.")
@@ -234,6 +266,9 @@ class FederationServer(FederationBase):
         if not event_id:
             raise NotImplementedError("Specify an event")
 
+        origin_host, _ = parse_server_name(origin)
+        yield self.check_server_matches_acl(origin_host, room_id)
+
         in_room = yield self.auth.check_host_in_room(room_id, origin)
         if not in_room:
             raise AuthError(403, "Host not in room.")
@@ -277,7 +312,7 @@ class FederationServer(FederationBase):
     @defer.inlineCallbacks
     @log_function
     def on_pdu_request(self, origin, event_id):
-        pdu = yield self._get_persisted_pdu(origin, event_id)
+        pdu = yield self.handler.get_persisted_pdu(origin, event_id)
 
         if pdu:
             defer.returnValue(
@@ -298,14 +333,27 @@ class FederationServer(FederationBase):
         defer.returnValue((200, resp))
 
     @defer.inlineCallbacks
-    def on_make_join_request(self, room_id, user_id):
+    def on_make_join_request(self, origin, room_id, user_id, supported_versions):
+        origin_host, _ = parse_server_name(origin)
+        yield self.check_server_matches_acl(origin_host, room_id)
+
+        room_version = yield self.store.get_room_version(room_id)
+        if room_version not in supported_versions:
+            logger.warn("Room version %s not in %s", room_version, supported_versions)
+            raise IncompatibleRoomVersionError(room_version=room_version)
+
         pdu = yield self.handler.on_make_join_request(room_id, user_id)
         time_now = self._clock.time_msec()
-        defer.returnValue({"event": pdu.get_pdu_json(time_now)})
+        defer.returnValue({
+            "event": pdu.get_pdu_json(time_now),
+            "room_version": room_version,
+        })
 
     @defer.inlineCallbacks
     def on_invite_request(self, origin, content):
         pdu = event_from_pdu_json(content)
+        origin_host, _ = parse_server_name(origin)
+        yield self.check_server_matches_acl(origin_host, pdu.room_id)
         ret_pdu = yield self.handler.on_invite_request(origin, pdu)
         time_now = self._clock.time_msec()
         defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
@@ -314,6 +362,10 @@ class FederationServer(FederationBase):
     def on_send_join_request(self, origin, content):
         logger.debug("on_send_join_request: content: %s", content)
         pdu = event_from_pdu_json(content)
+
+        origin_host, _ = parse_server_name(origin)
+        yield self.check_server_matches_acl(origin_host, pdu.room_id)
+
         logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
         res_pdus = yield self.handler.on_send_join_request(origin, pdu)
         time_now = self._clock.time_msec()
@@ -325,7 +377,9 @@ class FederationServer(FederationBase):
         }))
 
     @defer.inlineCallbacks
-    def on_make_leave_request(self, room_id, user_id):
+    def on_make_leave_request(self, origin, room_id, user_id):
+        origin_host, _ = parse_server_name(origin)
+        yield self.check_server_matches_acl(origin_host, room_id)
         pdu = yield self.handler.on_make_leave_request(room_id, user_id)
         time_now = self._clock.time_msec()
         defer.returnValue({"event": pdu.get_pdu_json(time_now)})
@@ -334,6 +388,10 @@ class FederationServer(FederationBase):
     def on_send_leave_request(self, origin, content):
         logger.debug("on_send_leave_request: content: %s", content)
         pdu = event_from_pdu_json(content)
+
+        origin_host, _ = parse_server_name(origin)
+        yield self.check_server_matches_acl(origin_host, pdu.room_id)
+
         logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
         yield self.handler.on_send_leave_request(origin, pdu)
         defer.returnValue((200, {}))
@@ -341,6 +399,9 @@ class FederationServer(FederationBase):
     @defer.inlineCallbacks
     def on_event_auth(self, origin, room_id, event_id):
         with (yield self._server_linearizer.queue((origin, room_id))):
+            origin_host, _ = parse_server_name(origin)
+            yield self.check_server_matches_acl(origin_host, room_id)
+
             time_now = self._clock.time_msec()
             auth_pdus = yield self.handler.on_event_auth(event_id)
             res = {
@@ -369,6 +430,9 @@ class FederationServer(FederationBase):
             Deferred: Results in `dict` with the same format as `content`
         """
         with (yield self._server_linearizer.queue((origin, room_id))):
+            origin_host, _ = parse_server_name(origin)
+            yield self.check_server_matches_acl(origin_host, room_id)
+
             auth_chain = [
                 event_from_pdu_json(e)
                 for e in content["auth_chain"]
@@ -381,6 +445,7 @@ class FederationServer(FederationBase):
             ret = yield self.handler.on_query_auth(
                 origin,
                 event_id,
+                room_id,
                 signed_auth,
                 content.get("rejects", []),
                 content.get("missing", []),
@@ -442,6 +507,9 @@ class FederationServer(FederationBase):
     def on_get_missing_events(self, origin, room_id, earliest_events,
                               latest_events, limit, min_depth):
         with (yield self._server_linearizer.queue((origin, room_id))):
+            origin_host, _ = parse_server_name(origin)
+            yield self.check_server_matches_acl(origin_host, room_id)
+
             logger.info(
                 "on_get_missing_events: earliest_events: %r, latest_events: %r,"
                 " limit: %d, min_depth: %d",
@@ -470,17 +538,6 @@ class FederationServer(FederationBase):
         ts_now_ms = self._clock.time_msec()
         return self.store.get_user_id_for_open_id_token(token, ts_now_ms)
 
-    @log_function
-    def _get_persisted_pdu(self, origin, event_id, do_auth=True):
-        """ Get a PDU from the database with given origin and id.
-
-        Returns:
-            Deferred: Results in a `Pdu`.
-        """
-        return self.handler.get_persisted_pdu(
-            origin, event_id, do_auth=do_auth
-        )
-
     def _transaction_from_pdus(self, pdu_list):
         """Returns a new Transaction containing the given PDUs suitable for
         transmission.
@@ -560,7 +617,9 @@ class FederationServer(FederationBase):
                 affected=pdu.event_id,
             )
 
-        yield self.handler.on_receive_pdu(origin, pdu, get_missing=True)
+        yield self.handler.on_receive_pdu(
+            origin, pdu, get_missing=True, sent_to_us_directly=True,
+        )
 
     def __str__(self):
         return "<ReplicationLayer(%s)>" % self.server_name
@@ -588,6 +647,101 @@ class FederationServer(FederationBase):
         )
         defer.returnValue(ret)
 
+    @defer.inlineCallbacks
+    def check_server_matches_acl(self, server_name, room_id):
+        """Check if the given server is allowed by the server ACLs in the room
+
+        Args:
+            server_name (str): name of server, *without any port part*
+            room_id (str): ID of the room to check
+
+        Raises:
+            AuthError if the server does not match the ACL
+        """
+        state_ids = yield self.store.get_current_state_ids(room_id)
+        acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
+
+        if not acl_event_id:
+            return
+
+        acl_event = yield self.store.get_event(acl_event_id)
+        if server_matches_acl_event(server_name, acl_event):
+            return
+
+        raise AuthError(code=403, msg="Server is banned from room")
+
+
+def server_matches_acl_event(server_name, acl_event):
+    """Check if the given server is allowed by the ACL event
+
+    Args:
+        server_name (str): name of server, without any port part
+        acl_event (EventBase): m.room.server_acl event
+
+    Returns:
+        bool: True if this server is allowed by the ACLs
+    """
+    logger.debug("Checking %s against acl %s", server_name, acl_event.content)
+
+    # first of all, check if literal IPs are blocked, and if so, whether the
+    # server name is a literal IP
+    allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
+    if not isinstance(allow_ip_literals, bool):
+        logger.warn("Ignorning non-bool allow_ip_literals flag")
+        allow_ip_literals = True
+    if not allow_ip_literals:
+        # check for ipv6 literals. These start with '['.
+        if server_name[0] == '[':
+            return False
+
+        # check for ipv4 literals. We can just lift the routine from twisted.
+        if isIPAddress(server_name):
+            return False
+
+    # next,  check the deny list
+    deny = acl_event.content.get("deny", [])
+    if not isinstance(deny, (list, tuple)):
+        logger.warn("Ignorning non-list deny ACL %s", deny)
+        deny = []
+    for e in deny:
+        if _acl_entry_matches(server_name, e):
+            # logger.info("%s matched deny rule %s", server_name, e)
+            return False
+
+    # then the allow list.
+    allow = acl_event.content.get("allow", [])
+    if not isinstance(allow, (list, tuple)):
+        logger.warn("Ignorning non-list allow ACL %s", allow)
+        allow = []
+    for e in allow:
+        if _acl_entry_matches(server_name, e):
+            # logger.info("%s matched allow rule %s", server_name, e)
+            return True
+
+    # everything else should be rejected.
+    # logger.info("%s fell through", server_name)
+    return False
+
+
+def _acl_entry_matches(server_name, acl_entry):
+    if not isinstance(acl_entry, six.string_types):
+        logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry))
+        return False
+    regex = _glob_to_regex(acl_entry)
+    return regex.match(server_name)
+
+
+def _glob_to_regex(glob):
+    res = ''
+    for c in glob:
+        if c == '*':
+            res = res + '.*'
+        elif c == '?':
+            res = res + '.'
+        else:
+            res = res + re.escape(c)
+    return re.compile(res + "\\Z", re.IGNORECASE)
+
 
 class FederationHandlerRegistry(object):
     """Allows classes to register themselves as handlers for a given EDU or
@@ -610,6 +764,8 @@ class FederationHandlerRegistry(object):
         if edu_type in self.edu_handlers:
             raise KeyError("Already have an EDU handler for %s" % (edu_type,))
 
+        logger.info("Registering federation EDU handler for %r", edu_type)
+
         self.edu_handlers[edu_type] = handler
 
     def register_query_handler(self, query_type, handler):
@@ -628,6 +784,8 @@ class FederationHandlerRegistry(object):
                 "Already have a Query handler for %s" % (query_type,)
             )
 
+        logger.info("Registering federation query handler for %r", query_type)
+
         self.query_handlers[query_type] = handler
 
     @defer.inlineCallbacks
@@ -650,3 +808,49 @@ class FederationHandlerRegistry(object):
             raise NotFoundError("No handler for Query type '%s'" % (query_type,))
 
         return handler(args)
+
+
+class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
+    """A FederationHandlerRegistry for worker processes.
+
+    When receiving EDU or queries it will check if an appropriate handler has
+    been registered on the worker, if there isn't one then it calls off to the
+    master process.
+    """
+
+    def __init__(self, hs):
+        self.config = hs.config
+        self.http_client = hs.get_simple_http_client()
+        self.clock = hs.get_clock()
+
+        self._get_query_client = ReplicationGetQueryRestServlet.make_client(hs)
+        self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs)
+
+        super(ReplicationFederationHandlerRegistry, self).__init__()
+
+    def on_edu(self, edu_type, origin, content):
+        """Overrides FederationHandlerRegistry
+        """
+        handler = self.edu_handlers.get(edu_type)
+        if handler:
+            return super(ReplicationFederationHandlerRegistry, self).on_edu(
+                edu_type, origin, content,
+            )
+
+        return self._send_edu(
+                edu_type=edu_type,
+                origin=origin,
+                content=content,
+        )
+
+    def on_query(self, query_type, args):
+        """Overrides FederationHandlerRegistry
+        """
+        handler = self.query_handlers.get(query_type)
+        if handler:
+            return handler(args)
+
+        return self._get_query_client(
+                query_type=query_type,
+                args=args,
+        )
diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py
index 84dc606673..9146215c21 100644
--- a/synapse/federation/persistence.py
+++ b/synapse/federation/persistence.py
@@ -19,13 +19,12 @@ package.
 These actions are mostly only used by the :py:mod:`.replication` module.
 """
 
+import logging
+
 from twisted.internet import defer
 
 from synapse.util.logutils import log_function
 
-import logging
-
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 1d5c0f3797..6f5995735a 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -29,18 +29,18 @@ dead worker doesn't cause the queues to grow limitlessly.
 Events are replicated via a separate events stream.
 """
 
-from .units import Edu
+import logging
+from collections import namedtuple
 
-from synapse.storage.presence import UserPresenceState
-from synapse.util.metrics import Measure
-from synapse.metrics import LaterGauge
+from six import iteritems
 
 from sortedcontainers import SortedDict
-from collections import namedtuple
 
-import logging
+from synapse.metrics import LaterGauge
+from synapse.storage.presence import UserPresenceState
+from synapse.util.metrics import Measure
 
-from six import itervalues, iteritems
+from .units import Edu
 
 logger = logging.getLogger(__name__)
 
@@ -62,8 +62,6 @@ class FederationRemoteSendQueue(object):
 
         self.edus = SortedDict()  # stream position -> Edu
 
-        self.failures = SortedDict()  # stream position -> (destination, Failure)
-
         self.device_messages = SortedDict()  # stream position -> destination
 
         self.pos = 1
@@ -79,7 +77,7 @@ class FederationRemoteSendQueue(object):
 
         for queue_name in [
             "presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
-            "edus", "failures", "device_messages", "pos_time",
+            "edus", "device_messages", "pos_time",
         ]:
             register(queue_name, getattr(self, queue_name))
 
@@ -119,7 +117,7 @@ class FederationRemoteSendQueue(object):
 
             user_ids = set(
                 user_id
-                for uids in itervalues(self.presence_changed)
+                for uids in self.presence_changed.values()
                 for user_id in uids
             )
 
@@ -149,12 +147,6 @@ class FederationRemoteSendQueue(object):
             for key in keys[:i]:
                 del self.edus[key]
 
-            # Delete things out of failure map
-            keys = self.failures.keys()
-            i = self.failures.bisect_left(position_to_delete)
-            for key in keys[:i]:
-                del self.failures[key]
-
             # Delete things out of device map
             keys = self.device_messages.keys()
             i = self.device_messages.bisect_left(position_to_delete)
@@ -204,13 +196,6 @@ class FederationRemoteSendQueue(object):
 
         self.notifier.on_new_replication_data()
 
-    def send_failure(self, failure, destination):
-        """As per TransactionQueue"""
-        pos = self._next_pos()
-
-        self.failures[pos] = (destination, str(failure))
-        self.notifier.on_new_replication_data()
-
     def send_device_messages(self, destination):
         """As per TransactionQueue"""
         pos = self._next_pos()
@@ -285,17 +270,6 @@ class FederationRemoteSendQueue(object):
         for (pos, edu) in edus:
             rows.append((pos, EduRow(edu)))
 
-        # Fetch changed failures
-        i = self.failures.bisect_right(from_token)
-        j = self.failures.bisect_right(to_token) + 1
-        failures = self.failures.items()[i:j]
-
-        for (pos, (destination, failure)) in failures:
-            rows.append((pos, FailureRow(
-                destination=destination,
-                failure=failure,
-            )))
-
         # Fetch changed device messages
         i = self.device_messages.bisect_right(from_token)
         j = self.device_messages.bisect_right(to_token) + 1
@@ -417,34 +391,6 @@ class EduRow(BaseFederationRow, namedtuple("EduRow", (
         buff.edus.setdefault(self.edu.destination, []).append(self.edu)
 
 
-class FailureRow(BaseFederationRow, namedtuple("FailureRow", (
-    "destination",  # str
-    "failure",
-))):
-    """Streams failures to a remote server. Failures are issued when there was
-    something wrong with a transaction the remote sent us, e.g. it included
-    an event that was invalid.
-    """
-
-    TypeId = "f"
-
-    @staticmethod
-    def from_data(data):
-        return FailureRow(
-            destination=data["destination"],
-            failure=data["failure"],
-        )
-
-    def to_data(self):
-        return {
-            "destination": self.destination,
-            "failure": self.failure,
-        }
-
-    def add_to_buffer(self, buff):
-        buff.failures.setdefault(self.destination, []).append(self.failure)
-
-
 class DeviceRow(BaseFederationRow, namedtuple("DeviceRow", (
     "destination",  # str
 ))):
@@ -471,7 +417,6 @@ TypeToRow = {
         PresenceRow,
         KeyedEduRow,
         EduRow,
-        FailureRow,
         DeviceRow,
     )
 }
@@ -481,7 +426,6 @@ ParsedFederationStreamData = namedtuple("ParsedFederationStreamData", (
     "presence",  # list(UserPresenceState)
     "keyed_edus",  # dict of destination -> { key -> Edu }
     "edus",  # dict of destination -> [Edu]
-    "failures",  # dict of destination -> [failures]
     "device_destinations",  # set of destinations
 ))
 
@@ -503,7 +447,6 @@ def process_rows_for_federation(transaction_queue, rows):
         presence=[],
         keyed_edus={},
         edus={},
-        failures={},
         device_destinations=set(),
     )
 
@@ -532,9 +475,5 @@ def process_rows_for_federation(transaction_queue, rows):
                 edu.destination, edu.edu_type, edu.content, key=None,
             )
 
-    for destination, failure_list in iteritems(buff.failures):
-        for failure in failure_list:
-            transaction_queue.send_failure(destination, failure)
-
     for destination in buff.device_destinations:
         transaction_queue.send_device_messages(destination)
diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py
index f0aeb5a0d3..94d7423d01 100644
--- a/synapse/federation/transaction_queue.py
+++ b/synapse/federation/transaction_queue.py
@@ -13,37 +13,40 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import datetime
+import logging
 
-from twisted.internet import defer
+from six import itervalues
 
-from .persistence import TransactionActions
-from .units import Transaction, Edu
+from prometheus_client import Counter
+
+from twisted.internet import defer
 
-from synapse.api.errors import HttpResponseException, FederationDeniedError
-from synapse.util import logcontext, PreserveLoggingContext
-from synapse.util.async import run_on_reactor
-from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
-from synapse.util.metrics import measure_func
-from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
 import synapse.metrics
-from synapse.metrics import LaterGauge
+from synapse.api.errors import FederationDeniedError, HttpResponseException
+from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
 from synapse.metrics import (
+    LaterGauge,
+    event_processing_loop_counter,
+    event_processing_loop_room_count,
+    events_processed_counter,
     sent_edus_counter,
     sent_transactions_counter,
-    events_processed_counter,
 )
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util import logcontext
+from synapse.util.metrics import measure_func
+from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
 
-from prometheus_client import Counter
-
-from six import itervalues
-
-import logging
-
+from .persistence import TransactionActions
+from .units import Edu, Transaction
 
 logger = logging.getLogger(__name__)
 
-sent_pdus_destination_dist = Counter(
-    "synapse_federation_transaction_queue_sent_pdu_destinations", ""
+sent_pdus_destination_dist_count = Counter(
+    "synapse_federation_client_sent_pdu_destinations:count", ""
+)
+sent_pdus_destination_dist_total = Counter(
+    "synapse_federation_client_sent_pdu_destinations:total", ""
 )
 
 
@@ -55,6 +58,7 @@ class TransactionQueue(object):
     """
 
     def __init__(self, hs):
+        self.hs = hs
         self.server_name = hs.hostname
 
         self.store = hs.get_datastore()
@@ -115,9 +119,6 @@ class TransactionQueue(object):
             ),
         )
 
-        # destination -> list of tuple(failure, deferred)
-        self.pending_failures_by_dest = {}
-
         # destination -> stream_id of last successfully sent to-device message.
         # NB: may be a long or an int.
         self.last_device_stream_id_by_dest = {}
@@ -165,10 +166,11 @@ class TransactionQueue(object):
         if self._is_processing:
             return
 
-        # fire off a processing loop in the background. It's likely it will
-        # outlast the current request, so run it in the sentinel logcontext.
-        with PreserveLoggingContext():
-            self._process_event_queue_loop()
+        # fire off a processing loop in the background
+        run_as_background_process(
+            "process_event_queue_for_federation",
+            self._process_event_queue_loop,
+        )
 
     @defer.inlineCallbacks
     def _process_event_queue_loop(self):
@@ -254,7 +256,13 @@ class TransactionQueue(object):
                     synapse.metrics.event_processing_last_ts.labels(
                         "federation_sender").set(ts)
 
-                events_processed_counter.inc(len(events))
+                    events_processed_counter.inc(len(events))
+
+                    event_processing_loop_room_count.labels(
+                        "federation_sender"
+                    ).inc(len(events_by_room))
+
+                event_processing_loop_counter.labels("federation_sender").inc()
 
                 synapse.metrics.event_processing_positions.labels(
                     "federation_sender").set(next_token)
@@ -280,7 +288,8 @@ class TransactionQueue(object):
         if not destinations:
             return
 
-        sent_pdus_destination_dist.inc(len(destinations))
+        sent_pdus_destination_dist_total.inc(len(destinations))
+        sent_pdus_destination_dist_count.inc()
 
         for destination in destinations:
             self.pending_pdus_by_dest.setdefault(destination, []).append(
@@ -300,6 +309,9 @@ class TransactionQueue(object):
         Args:
             states (list(UserPresenceState))
         """
+        if not self.hs.config.use_presence:
+            # No-op if presence is disabled.
+            return
 
         # First we queue up the new presence by user ID, so multiple presence
         # updates in quick successtion are correctly handled
@@ -379,19 +391,6 @@ class TransactionQueue(object):
 
         self._attempt_new_transaction(destination)
 
-    def send_failure(self, failure, destination):
-        if destination == self.server_name or destination == "localhost":
-            return
-
-        if not self.can_send_to(destination):
-            return
-
-        self.pending_failures_by_dest.setdefault(
-            destination, []
-        ).append(failure)
-
-        self._attempt_new_transaction(destination)
-
     def send_device_messages(self, destination):
         if destination == self.server_name or destination == "localhost":
             return
@@ -431,14 +430,11 @@ class TransactionQueue(object):
 
         logger.debug("TX [%s] Starting transaction loop", destination)
 
-        # Drop the logcontext before starting the transaction. It doesn't
-        # really make sense to log all the outbound transactions against
-        # whatever path led us to this point: that's pretty arbitrary really.
-        #
-        # (this also means we can fire off _perform_transaction without
-        # yielding)
-        with logcontext.PreserveLoggingContext():
-            self._transaction_transmission_loop(destination)
+        run_as_background_process(
+            "federation_transaction_transmission_loop",
+            self._transaction_transmission_loop,
+            destination,
+        )
 
     @defer.inlineCallbacks
     def _transaction_transmission_loop(self, destination):
@@ -451,9 +447,6 @@ class TransactionQueue(object):
             # hence why we throw the result away.
             yield get_retry_limiter(destination, self.clock, self.store)
 
-            # XXX: what's this for?
-            yield run_on_reactor()
-
             pending_pdus = []
             while True:
                 device_message_edus, device_stream_id, dev_list_id = (
@@ -472,7 +465,6 @@ class TransactionQueue(object):
                 pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
                 pending_edus = self.pending_edus_by_dest.pop(destination, [])
                 pending_presence = self.pending_presence_by_dest.pop(destination, {})
-                pending_failures = self.pending_failures_by_dest.pop(destination, [])
 
                 pending_edus.extend(
                     self.pending_edus_keyed_by_dest.pop(destination, {}).values()
@@ -500,7 +492,7 @@ class TransactionQueue(object):
                     logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
                                  destination, len(pending_pdus))
 
-                if not pending_pdus and not pending_edus and not pending_failures:
+                if not pending_pdus and not pending_edus:
                     logger.debug("TX [%s] Nothing to send", destination)
                     self.last_device_stream_id_by_dest[destination] = (
                         device_stream_id
@@ -510,7 +502,7 @@ class TransactionQueue(object):
                 # END CRITICAL SECTION
 
                 success = yield self._send_new_transaction(
-                    destination, pending_pdus, pending_edus, pending_failures,
+                    destination, pending_pdus, pending_edus,
                 )
                 if success:
                     sent_transactions_counter.inc()
@@ -587,14 +579,12 @@ class TransactionQueue(object):
 
     @measure_func("_send_new_transaction")
     @defer.inlineCallbacks
-    def _send_new_transaction(self, destination, pending_pdus, pending_edus,
-                              pending_failures):
+    def _send_new_transaction(self, destination, pending_pdus, pending_edus):
 
         # Sort based on the order field
         pending_pdus.sort(key=lambda t: t[1])
         pdus = [x[0] for x in pending_pdus]
         edus = pending_edus
-        failures = [x.get_dict() for x in pending_failures]
 
         success = True
 
@@ -604,11 +594,10 @@ class TransactionQueue(object):
 
         logger.debug(
             "TX [%s] {%s} Attempting new transaction"
-            " (pdus: %d, edus: %d, failures: %d)",
+            " (pdus: %d, edus: %d)",
             destination, txn_id,
             len(pdus),
             len(edus),
-            len(failures)
         )
 
         logger.debug("TX [%s] Persisting transaction...", destination)
@@ -620,7 +609,6 @@ class TransactionQueue(object):
             destination=destination,
             pdus=pdus,
             edus=edus,
-            pdu_failures=failures,
         )
 
         self._next_txn_id += 1
@@ -630,12 +618,11 @@ class TransactionQueue(object):
         logger.debug("TX [%s] Persisted transaction", destination)
         logger.info(
             "TX [%s] {%s} Sending transaction [%s],"
-            " (PDUs: %d, EDUs: %d, failures: %d)",
+            " (PDUs: %d, EDUs: %d)",
             destination, txn_id,
             transaction.transaction_id,
             len(pdus),
             len(edus),
-            len(failures),
         )
 
         # Actually send the transaction
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 6db8efa6dd..1054441ca5 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -14,16 +14,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+import urllib
+
 from twisted.internet import defer
-from synapse.api.constants import Membership
 
+from synapse.api.constants import Membership
 from synapse.api.urls import FEDERATION_PREFIX as PREFIX
 from synapse.util.logutils import log_function
 
-import logging
-import urllib
-
-
 logger = logging.getLogger(__name__)
 
 
@@ -107,7 +106,7 @@ class TransportLayerClient(object):
             dest (str)
             room_id (str)
             event_tuples (list)
-            limt (int)
+            limit (int)
 
         Returns:
             Deferred: Results in a dict received from the remote homeserver.
@@ -196,7 +195,7 @@ class TransportLayerClient(object):
 
     @defer.inlineCallbacks
     @log_function
-    def make_membership_event(self, destination, room_id, user_id, membership):
+    def make_membership_event(self, destination, room_id, user_id, membership, params):
         """Asks a remote server to build and sign us a membership event
 
         Note that this does not append any events to any graphs.
@@ -206,6 +205,8 @@ class TransportLayerClient(object):
             room_id (str): room to join/leave
             user_id (str): user to be joined/left
             membership (str): one of join/leave
+            params (dict[str, str|Iterable[str]]): Query parameters to include in the
+                request.
 
         Returns:
             Deferred: Succeeds when we get a 2xx HTTP response. The result
@@ -242,6 +243,7 @@ class TransportLayerClient(object):
         content = yield self.client.get_json(
             destination=destination,
             path=path,
+            args=params,
             retry_on_dns_fail=retry_on_dns_fail,
             timeout=20000,
             ignore_backoff=ignore_backoff,
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 19d09f5422..7a993fd1cf 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -14,25 +14,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import functools
+import logging
+import re
+
 from twisted.internet import defer
 
+import synapse
+from synapse.api.errors import Codes, FederationDeniedError, SynapseError
 from synapse.api.urls import FEDERATION_PREFIX as PREFIX
-from synapse.api.errors import Codes, SynapseError, FederationDeniedError
+from synapse.http.endpoint import parse_and_validate_server_name
 from synapse.http.server import JsonResource
 from synapse.http.servlet import (
-    parse_json_object_from_request, parse_integer_from_args, parse_string_from_args,
     parse_boolean_from_args,
+    parse_integer_from_args,
+    parse_json_object_from_request,
+    parse_string_from_args,
 )
+from synapse.types import ThirdPartyInstanceID, get_domain_from_id
+from synapse.util.logcontext import run_in_background
 from synapse.util.ratelimitutils import FederationRateLimiter
 from synapse.util.versionstring import get_version_string
-from synapse.util.logcontext import run_in_background
-from synapse.types import ThirdPartyInstanceID, get_domain_from_id
-
-import functools
-import logging
-import re
-import synapse
-
 
 logger = logging.getLogger(__name__)
 
@@ -99,26 +101,6 @@ class Authenticator(object):
 
         origin = None
 
-        def parse_auth_header(header_str):
-            try:
-                params = auth.split(" ")[1].split(",")
-                param_dict = dict(kv.split("=") for kv in params)
-
-                def strip_quotes(value):
-                    if value.startswith("\""):
-                        return value[1:-1]
-                    else:
-                        return value
-
-                origin = strip_quotes(param_dict["origin"])
-                key = strip_quotes(param_dict["key"])
-                sig = strip_quotes(param_dict["sig"])
-                return (origin, key, sig)
-            except Exception:
-                raise AuthenticationError(
-                    400, "Malformed Authorization header", Codes.UNAUTHORIZED
-                )
-
         auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
 
         if not auth_headers:
@@ -127,8 +109,8 @@ class Authenticator(object):
             )
 
         for auth in auth_headers:
-            if auth.startswith("X-Matrix"):
-                (origin, key, sig) = parse_auth_header(auth)
+            if auth.startswith(b"X-Matrix"):
+                (origin, key, sig) = _parse_auth_header(auth)
                 json_request["origin"] = origin
                 json_request["signatures"].setdefault(origin, {})[key] = sig
 
@@ -165,7 +147,84 @@ class Authenticator(object):
             logger.exception("Error resetting retry timings on %s", origin)
 
 
+def _parse_auth_header(header_bytes):
+    """Parse an X-Matrix auth header
+
+    Args:
+        header_bytes (bytes): header value
+
+    Returns:
+        Tuple[str, str, str]: origin, key id, signature.
+
+    Raises:
+        AuthenticationError if the header could not be parsed
+    """
+    try:
+        header_str = header_bytes.decode('utf-8')
+        params = header_str.split(" ")[1].split(",")
+        param_dict = dict(kv.split("=") for kv in params)
+
+        def strip_quotes(value):
+            if value.startswith("\""):
+                return value[1:-1]
+            else:
+                return value
+
+        origin = strip_quotes(param_dict["origin"])
+
+        # ensure that the origin is a valid server name
+        parse_and_validate_server_name(origin)
+
+        key = strip_quotes(param_dict["key"])
+        sig = strip_quotes(param_dict["sig"])
+        return origin, key, sig
+    except Exception as e:
+        logger.warn(
+            "Error parsing auth header '%s': %s",
+            header_bytes.decode('ascii', 'replace'),
+            e,
+        )
+        raise AuthenticationError(
+            400, "Malformed Authorization header", Codes.UNAUTHORIZED,
+        )
+
+
 class BaseFederationServlet(object):
+    """Abstract base class for federation servlet classes.
+
+    The servlet object should have a PATH attribute which takes the form of a regexp to
+    match against the request path (excluding the /federation/v1 prefix).
+
+    The servlet should also implement one or more of on_GET, on_POST, on_PUT, to match
+    the appropriate HTTP method. These methods have the signature:
+
+        on_<METHOD>(self, origin, content, query, **kwargs)
+
+        With arguments:
+
+            origin (unicode|None): The authenticated server_name of the calling server,
+                unless REQUIRE_AUTH is set to False and authentication failed.
+
+            content (unicode|None): decoded json body of the request. None if the
+                request was a GET.
+
+            query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
+                (ie, '+' and '%xx' are decoded) but note that it is *not* utf8-decoded
+                yet.
+
+            **kwargs (dict[unicode, unicode]): the dict mapping keys to path
+                components as specified in the path match regexp.
+
+        Returns:
+            Deferred[(int, object)|None]: either (response code, response object) to
+                 return a JSON response, or None if the request has already been handled.
+
+        Raises:
+            SynapseError: to return an error code
+
+            Exception: other exceptions will be caught, logged, and a 500 will be
+                returned.
+    """
     REQUIRE_AUTH = True
 
     def __init__(self, handler, authenticator, ratelimiter, server_name):
@@ -180,6 +239,18 @@ class BaseFederationServlet(object):
         @defer.inlineCallbacks
         @functools.wraps(func)
         def new_func(request, *args, **kwargs):
+            """ A callback which can be passed to HttpServer.RegisterPaths
+
+            Args:
+                request (twisted.web.http.Request):
+                *args: unused?
+                **kwargs (dict[unicode, unicode]): the dict mapping keys to path
+                    components as specified in the path match regexp.
+
+            Returns:
+                Deferred[(int, object)|None]: (response code, response object) as returned
+                    by the callback method. None if the request has already been handled.
+            """
             content = None
             if request.method in ["PUT", "POST"]:
                 # TODO: Handle other method types? other content types?
@@ -190,10 +261,10 @@ class BaseFederationServlet(object):
             except NoAuthenticationError:
                 origin = None
                 if self.REQUIRE_AUTH:
-                    logger.exception("authenticate_request failed")
+                    logger.warn("authenticate_request failed: missing authentication")
                     raise
-            except Exception:
-                logger.exception("authenticate_request failed")
+            except Exception as e:
+                logger.warn("authenticate_request failed: %s", e)
                 raise
 
             if origin:
@@ -259,11 +330,10 @@ class FederationSendServlet(BaseFederationServlet):
             )
 
             logger.info(
-                "Received txn %s from %s. (PDUs: %d, EDUs: %d, failures: %d)",
+                "Received txn %s from %s. (PDUs: %d, EDUs: %d)",
                 transaction_id, origin,
                 len(transaction_data.get("pdus", [])),
                 len(transaction_data.get("edus", [])),
-                len(transaction_data.get("failures", [])),
             )
 
             # We should ideally be getting this from the security layer.
@@ -361,8 +431,32 @@ class FederationMakeJoinServlet(BaseFederationServlet):
     PATH = "/make_join/(?P<context>[^/]*)/(?P<user_id>[^/]*)"
 
     @defer.inlineCallbacks
-    def on_GET(self, origin, content, query, context, user_id):
-        content = yield self.handler.on_make_join_request(context, user_id)
+    def on_GET(self, origin, _content, query, context, user_id):
+        """
+        Args:
+            origin (unicode): The authenticated server_name of the calling server
+
+            _content (None): (GETs don't have bodies)
+
+            query (dict[bytes, list[bytes]]): Query params from the request.
+
+            **kwargs (dict[unicode, unicode]): the dict mapping keys to path
+                components as specified in the path match regexp.
+
+        Returns:
+            Deferred[(int, object)|None]: either (response code, response object) to
+                 return a JSON response, or None if the request has already been handled.
+        """
+        versions = query.get(b'ver')
+        if versions is not None:
+            supported_versions = [v.decode("utf-8") for v in versions]
+        else:
+            supported_versions = ["1"]
+
+        content = yield self.handler.on_make_join_request(
+            origin, context, user_id,
+            supported_versions=supported_versions,
+        )
         defer.returnValue((200, content))
 
 
@@ -371,15 +465,17 @@ class FederationMakeLeaveServlet(BaseFederationServlet):
 
     @defer.inlineCallbacks
     def on_GET(self, origin, content, query, context, user_id):
-        content = yield self.handler.on_make_leave_request(context, user_id)
+        content = yield self.handler.on_make_leave_request(
+            origin, context, user_id,
+        )
         defer.returnValue((200, content))
 
 
 class FederationSendLeaveServlet(BaseFederationServlet):
-    PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<txid>[^/]*)"
+    PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
 
     @defer.inlineCallbacks
-    def on_PUT(self, origin, content, query, room_id, txid):
+    def on_PUT(self, origin, content, query, room_id, event_id):
         content = yield self.handler.on_send_leave_request(origin, content)
         defer.returnValue((200, content))
 
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index 01c5b8fe17..c5ab14314e 100644
--- a/synapse/federation/units.py
+++ b/synapse/federation/units.py
@@ -17,10 +17,9 @@
 server protocol.
 """
 
-from synapse.util.jsonobject import JsonEncodedObject
-
 import logging
 
+from synapse.util.jsonobject import JsonEncodedObject
 
 logger = logging.getLogger(__name__)
 
@@ -74,7 +73,6 @@ class Transaction(JsonEncodedObject):
         "previous_ids",
         "pdus",
         "edus",
-        "pdu_failures",
     ]
 
     internal_keys = [
diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py
index 6f11fa374b..b04f4234ca 100644
--- a/synapse/groups/attestations.py
+++ b/synapse/groups/attestations.py
@@ -23,9 +23,9 @@ If a user leaves (or gets kicked out of) a group, either side can still use
 their attestation to "prove" their membership, until the attestation expires.
 Therefore attestations shouldn't be relied on to prove membership in important
 cases, but can for less important situtations, e.g. showing a users membership
-of groups on their profile, showing flairs, etc.abs
+of groups on their profile, showing flairs, etc.
 
-An attestsation is a signed blob of json that looks like:
+An attestation is a signed blob of json that looks like:
 
     {
         "user_id": "@foo:a.example.com",
@@ -38,15 +38,15 @@ An attestsation is a signed blob of json that looks like:
 import logging
 import random
 
+from signedjson.sign import sign_json
+
 from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.types import get_domain_from_id
 from synapse.util.logcontext import run_in_background
 
-from signedjson.sign import sign_json
-
-
 logger = logging.getLogger(__name__)
 
 
@@ -130,7 +130,7 @@ class GroupAttestionRenewer(object):
         self.attestations = hs.get_groups_attestation_signing()
 
         self._renew_attestations_loop = self.clock.looping_call(
-            self._renew_attestations, 30 * 60 * 1000,
+            self._start_renew_attestations, 30 * 60 * 1000,
         )
 
     @defer.inlineCallbacks
@@ -152,6 +152,9 @@ class GroupAttestionRenewer(object):
 
         defer.returnValue({})
 
+    def _start_renew_attestations(self):
+        return run_as_background_process("renew_attestations", self._renew_attestations)
+
     @defer.inlineCallbacks
     def _renew_attestations(self):
         """Called periodically to check if we need to update any of our attestations
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index 62d20ad130..633c865ed8 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -16,11 +16,12 @@
 
 import logging
 
-from synapse.api.errors import SynapseError
-from synapse.types import GroupID, RoomID, UserID, get_domain_from_id
+from six import string_types
+
 from twisted.internet import defer
 
-from six import string_types
+from synapse.api.errors import SynapseError
+from synapse.types import GroupID, RoomID, UserID, get_domain_from_id
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py
index d358842b3e..413425fed1 100644
--- a/synapse/handlers/__init__.py
+++ b/synapse/handlers/__init__.py
@@ -13,13 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from .register import RegistrationHandler
-from .room import RoomContextHandler
-from .message import MessageHandler
-from .federation import FederationHandler
-from .directory import DirectoryHandler
 from .admin import AdminHandler
+from .directory import DirectoryHandler
+from .federation import FederationHandler
 from .identity import IdentityHandler
+from .register import RegistrationHandler
 from .search import SearchHandler
 
 
@@ -44,10 +42,8 @@ class Handlers(object):
 
     def __init__(self, hs):
         self.registration_handler = RegistrationHandler(hs)
-        self.message_handler = MessageHandler(hs)
         self.federation_handler = FederationHandler(hs)
         self.directory_handler = DirectoryHandler(hs)
         self.admin_handler = AdminHandler(hs)
         self.identity_handler = IdentityHandler(hs)
         self.search_handler = SearchHandler(hs)
-        self.room_context_handler = RoomContextHandler(hs)
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index 2d1db0c245..704181d2d3 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -18,11 +18,10 @@ import logging
 from twisted.internet import defer
 
 import synapse.types
-from synapse.api.constants import Membership, EventTypes
+from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import LimitExceededError
 from synapse.types import UserID
 
-
 logger = logging.getLogger(__name__)
 
 
@@ -113,8 +112,9 @@ class BaseHandler(object):
             guest_access = event.content.get("guest_access", "forbidden")
             if guest_access != "can_join":
                 if context:
+                    current_state_ids = yield context.get_current_state_ids(self.store)
                     current_state = yield self.store.get_events(
-                        list(context.current_state_ids.values())
+                        list(current_state_ids.values())
                     )
                 else:
                     current_state = yield self.state_handler.get_current_state(
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index f36b358b45..5d629126fc 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -13,12 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
 from ._base import BaseHandler
 
-import logging
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 1c29c43a83..f0f89af7dc 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -13,19 +13,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
+import logging
 
 from six import itervalues
 
+from prometheus_client import Counter
+
+from twisted.internet import defer
+
 import synapse
 from synapse.api.constants import EventTypes
-from synapse.util.metrics import Measure
-from synapse.util.logcontext import (
-    make_deferred_yieldable, run_in_background,
+from synapse.metrics import (
+    event_processing_loop_counter,
+    event_processing_loop_room_count,
 )
-from prometheus_client import Counter
-
-import logging
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.logcontext import make_deferred_yieldable, run_in_background
+from synapse.util.metrics import Measure
 
 logger = logging.getLogger(__name__)
 
@@ -107,7 +111,9 @@ class ApplicationServicesHandler(object):
                             yield self._check_user_exists(event.state_key)
 
                         if not self.started_scheduler:
-                            self.scheduler.start().addErrback(log_failure)
+                            def start_scheduler():
+                                return self.scheduler.start().addErrback(log_failure)
+                            run_as_background_process("as_scheduler", start_scheduler)
                             self.started_scheduler = True
 
                         # Fork off pushes to these services
@@ -134,6 +140,12 @@ class ApplicationServicesHandler(object):
 
                     events_processed_counter.inc(len(events))
 
+                    event_processing_loop_room_count.labels(
+                        "appservice_sender"
+                    ).inc(len(events_by_room))
+
+                    event_processing_loop_counter.labels("appservice_sender").inc()
+
                     synapse.metrics.event_processing_lag.labels(
                         "appservice_sender").set(now - ts)
                     synapse.metrics.event_processing_last_ts.labels(
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 3c0051586d..4a81bd2ba9 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -13,29 +13,34 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
+import logging
+import unicodedata
+
+import attr
+import bcrypt
+import pymacaroons
+from canonicaljson import json
+
 from twisted.internet import defer, threads
+from twisted.web.client import PartialDownloadError
 
-from ._base import BaseHandler
+import synapse.util.stringutils as stringutils
 from synapse.api.constants import LoginType
 from synapse.api.errors import (
-    AuthError, Codes, InteractiveAuthIncompleteError, LoginError, StoreError,
+    AuthError,
+    Codes,
+    InteractiveAuthIncompleteError,
+    LoginError,
+    StoreError,
     SynapseError,
 )
 from synapse.module_api import ModuleApi
 from synapse.types import UserID
-from synapse.util.async import run_on_reactor
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.logcontext import make_deferred_yieldable
 
-from twisted.web.client import PartialDownloadError
-
-import logging
-import bcrypt
-import pymacaroons
-import simplejson
-
-import synapse.util.stringutils as stringutils
-
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
@@ -402,7 +407,7 @@ class AuthHandler(BaseHandler):
         except PartialDownloadError as pde:
             # Twisted is silly
             data = pde.response
-            resp_body = simplejson.loads(data)
+            resp_body = json.loads(data)
 
         if 'success' in resp_body:
             # Note that we do NOT check the hostname here: we explicitly
@@ -423,15 +428,11 @@ class AuthHandler(BaseHandler):
     def _check_msisdn(self, authdict, _):
         return self._check_threepid('msisdn', authdict)
 
-    @defer.inlineCallbacks
     def _check_dummy_auth(self, authdict, _):
-        yield run_on_reactor()
-        defer.returnValue(True)
+        return defer.succeed(True)
 
     @defer.inlineCallbacks
     def _check_threepid(self, medium, authdict):
-        yield run_on_reactor()
-
         if 'threepid_creds' not in authdict:
             raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
 
@@ -519,6 +520,7 @@ class AuthHandler(BaseHandler):
         """
         logger.info("Logging in user %s on device %s", user_id, device_id)
         access_token = yield self.issue_access_token(user_id, device_id)
+        yield self.auth.check_auth_blocking(user_id)
 
         # the device *should* have been registered before we got here; however,
         # it's possible we raced against a DELETE operation. The thing we
@@ -626,6 +628,7 @@ class AuthHandler(BaseHandler):
         # special case to check for "password" for the check_password interface
         # for the auth providers
         password = login_submission.get("password")
+
         if login_type == LoginType.PASSWORD:
             if not self._password_enabled:
                 raise SynapseError(400, "Password login has been disabled.")
@@ -707,9 +710,10 @@ class AuthHandler(BaseHandler):
         multiple inexact matches.
 
         Args:
-            user_id (str): complete @user:id
+            user_id (unicode): complete @user:id
+            password (unicode): the provided password
         Returns:
-            (str) the canonical_user_id, or None if unknown user / bad password
+            (unicode) the canonical_user_id, or None if unknown user / bad password
         """
         lookupres = yield self._find_user_id_and_pwd_hash(user_id)
         if not lookupres:
@@ -728,15 +732,18 @@ class AuthHandler(BaseHandler):
                                                   device_id)
         defer.returnValue(access_token)
 
+    @defer.inlineCallbacks
     def validate_short_term_login_token_and_get_user_id(self, login_token):
         auth_api = self.hs.get_auth()
+        user_id = None
         try:
             macaroon = pymacaroons.Macaroon.deserialize(login_token)
             user_id = auth_api.get_user_id_from_macaroon(macaroon)
             auth_api.validate_macaroon(macaroon, "login", True, user_id)
-            return user_id
         except Exception:
             raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
+        yield self.auth.check_auth_blocking(user_id)
+        defer.returnValue(user_id)
 
     @defer.inlineCallbacks
     def delete_access_token(self, access_token):
@@ -821,14 +828,37 @@ class AuthHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def delete_threepid(self, user_id, medium, address):
+        """Attempts to unbind the 3pid on the identity servers and deletes it
+        from the local database.
+
+        Args:
+            user_id (str)
+            medium (str)
+            address (str)
+
+        Returns:
+            Deferred[bool]: Returns True if successfully unbound the 3pid on
+            the identity server, False if identity server doesn't support the
+            unbind API.
+        """
+
         # 'Canonicalise' email addresses as per above
         if medium == 'email':
             address = address.lower()
 
-        ret = yield self.store.user_delete_threepid(
+        identity_handler = self.hs.get_handlers().identity_handler
+        result = yield identity_handler.try_unbind_threepid(
+            user_id,
+            {
+                'medium': medium,
+                'address': address,
+            },
+        )
+
+        yield self.store.user_delete_threepid(
             user_id, medium, address,
         )
-        defer.returnValue(ret)
+        defer.returnValue(result)
 
     def _save_session(self, session):
         # TODO: Persistent storage
@@ -840,45 +870,62 @@ class AuthHandler(BaseHandler):
         """Computes a secure hash of password.
 
         Args:
-            password (str): Password to hash.
+            password (unicode): Password to hash.
 
         Returns:
-            Deferred(str): Hashed password.
+            Deferred(unicode): Hashed password.
         """
         def _do_hash():
-            return bcrypt.hashpw(password.encode('utf8') + self.hs.config.password_pepper,
-                                 bcrypt.gensalt(self.bcrypt_rounds))
-
-        return make_deferred_yieldable(threads.deferToThread(_do_hash))
+            # Normalise the Unicode in the password
+            pw = unicodedata.normalize("NFKC", password)
+
+            return bcrypt.hashpw(
+                pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"),
+                bcrypt.gensalt(self.bcrypt_rounds),
+            ).decode('ascii')
+
+        return make_deferred_yieldable(
+            threads.deferToThreadPool(
+                self.hs.get_reactor(), self.hs.get_reactor().getThreadPool(), _do_hash
+            ),
+        )
 
     def validate_hash(self, password, stored_hash):
         """Validates that self.hash(password) == stored_hash.
 
         Args:
-            password (str): Password to hash.
-            stored_hash (str): Expected hash value.
+            password (unicode): Password to hash.
+            stored_hash (unicode): Expected hash value.
 
         Returns:
             Deferred(bool): Whether self.hash(password) == stored_hash.
         """
 
         def _do_validate_hash():
+            # Normalise the Unicode in the password
+            pw = unicodedata.normalize("NFKC", password)
+
             return bcrypt.checkpw(
-                password.encode('utf8') + self.hs.config.password_pepper,
+                pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"),
                 stored_hash.encode('utf8')
             )
 
         if stored_hash:
-            return make_deferred_yieldable(threads.deferToThread(_do_validate_hash))
+            return make_deferred_yieldable(
+                threads.deferToThreadPool(
+                    self.hs.get_reactor(),
+                    self.hs.get_reactor().getThreadPool(),
+                    _do_validate_hash,
+                ),
+            )
         else:
             return defer.succeed(False)
 
 
-class MacaroonGeneartor(object):
-    def __init__(self, hs):
-        self.clock = hs.get_clock()
-        self.server_name = hs.config.server_name
-        self.macaroon_secret_key = hs.config.macaroon_secret_key
+@attr.s
+class MacaroonGenerator(object):
+
+    hs = attr.ib()
 
     def generate_access_token(self, user_id, extra_caveats=None):
         extra_caveats = extra_caveats or []
@@ -896,7 +943,7 @@ class MacaroonGeneartor(object):
     def generate_short_term_login_token(self, user_id, duration_in_ms=(2 * 60 * 1000)):
         macaroon = self._generate_base_macaroon(user_id)
         macaroon.add_first_party_caveat("type = login")
-        now = self.clock.time_msec()
+        now = self.hs.get_clock().time_msec()
         expiry = now + duration_in_ms
         macaroon.add_first_party_caveat("time < %d" % (expiry,))
         return macaroon.serialize()
@@ -908,9 +955,9 @@ class MacaroonGeneartor(object):
 
     def _generate_base_macaroon(self, user_id):
         macaroon = pymacaroons.Macaroon(
-            location=self.server_name,
+            location=self.hs.config.server_name,
             identifier="key",
-            key=self.macaroon_secret_key)
+            key=self.hs.config.macaroon_secret_key)
         macaroon.add_first_party_caveat("gen = 1")
         macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
         return macaroon
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index c5e92f6214..b078df4a76 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -12,13 +12,15 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from twisted.internet import defer, reactor
+import logging
 
-from ._base import BaseHandler
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError
 from synapse.types import UserID, create_requester
 from synapse.util.logcontext import run_in_background
 
-import logging
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
@@ -30,6 +32,7 @@ class DeactivateAccountHandler(BaseHandler):
         self._auth_handler = hs.get_auth_handler()
         self._device_handler = hs.get_device_handler()
         self._room_member_handler = hs.get_room_member_handler()
+        self._identity_handler = hs.get_handlers().identity_handler
         self.user_directory_handler = hs.get_user_directory_handler()
 
         # Flag that indicates whether the process to part users from rooms is running
@@ -37,29 +40,58 @@ class DeactivateAccountHandler(BaseHandler):
 
         # Start the user parter loop so it can resume parting users from rooms where
         # it left off (if it has work left to do).
-        reactor.callWhenRunning(self._start_user_parting)
+        hs.get_reactor().callWhenRunning(self._start_user_parting)
 
     @defer.inlineCallbacks
-    def deactivate_account(self, user_id):
+    def deactivate_account(self, user_id, erase_data):
         """Deactivate a user's account
 
         Args:
             user_id (str): ID of user to be deactivated
+            erase_data (bool): whether to GDPR-erase the user's data
 
         Returns:
-            Deferred
+            Deferred[bool]: True if identity server supports removing
+            threepids, otherwise False.
         """
         # FIXME: Theoretically there is a race here wherein user resets
         # password using threepid.
 
-        # first delete any devices belonging to the user, which will also
+        # delete threepids first. We remove these from the IS so if this fails,
+        # leave the user still active so they can try again.
+        # Ideally we would prevent password resets and then do this in the
+        # background thread.
+
+        # This will be set to false if the identity server doesn't support
+        # unbinding
+        identity_server_supports_unbinding = True
+
+        threepids = yield self.store.user_get_threepids(user_id)
+        for threepid in threepids:
+            try:
+                result = yield self._identity_handler.try_unbind_threepid(
+                    user_id,
+                    {
+                        'medium': threepid['medium'],
+                        'address': threepid['address'],
+                    },
+                )
+                identity_server_supports_unbinding &= result
+            except Exception:
+                # Do we want this to be a fatal error or should we carry on?
+                logger.exception("Failed to remove threepid from ID server")
+                raise SynapseError(400, "Failed to remove threepid from ID server")
+            yield self.store.user_delete_threepid(
+                user_id, threepid['medium'], threepid['address'],
+            )
+
+        # delete any devices belonging to the user, which will also
         # delete corresponding access tokens.
         yield self._device_handler.delete_all_devices_for_user(user_id)
         # then delete any remaining access tokens which weren't associated with
         # a device.
         yield self._auth_handler.delete_access_tokens_for_user(user_id)
 
-        yield self.store.user_delete_threepids(user_id)
         yield self.store.user_set_password_hash(user_id, None)
 
         # Add the user to a table of users pending deactivation (ie.
@@ -69,10 +101,17 @@ class DeactivateAccountHandler(BaseHandler):
         # delete from user directory
         yield self.user_directory_handler.handle_user_deactivated(user_id)
 
+        # Mark the user as erased, if they asked for that
+        if erase_data:
+            logger.info("Marking %s as erased", user_id)
+            yield self.store.mark_user_erased(user_id)
+
         # Now start the process that goes through that list and
         # parts users from rooms (if it isn't already running)
         self._start_user_parting()
 
+        defer.returnValue(identity_server_supports_unbinding)
+
     def _start_user_parting(self):
         """
         Start the process that goes through the table of users
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 11c6fb3657..9e017116a9 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -12,21 +12,23 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import logging
+
+from six import iteritems, itervalues
+
+from twisted.internet import defer
+
 from synapse.api import errors
 from synapse.api.constants import EventTypes
 from synapse.api.errors import FederationDeniedError
+from synapse.types import RoomStreamToken, get_domain_from_id
 from synapse.util import stringutils
-from synapse.util.async import Linearizer
+from synapse.util.async_helpers import Linearizer
 from synapse.util.caches.expiringcache import ExpiringCache
-from synapse.util.retryutils import NotRetryingDestination
 from synapse.util.metrics import measure_func
-from synapse.types import get_domain_from_id, RoomStreamToken
-from twisted.internet import defer
-from ._base import BaseHandler
-
-import logging
+from synapse.util.retryutils import NotRetryingDestination
 
-from six import itervalues, iteritems
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
@@ -537,7 +539,7 @@ class DeviceListEduUpdater(object):
                 yield self.device_handler.notify_device_update(user_id, device_ids)
             else:
                 # Simply update the single device, since we know that is the only
-                # change (becuase of the single prev_id matching the current cache)
+                # change (because of the single prev_id matching the current cache)
                 for device_id, stream_id, prev_ids, content in pending_updates:
                     yield self.store.update_remote_device_list_cache_entry(
                         user_id, device_id, content, stream_id,
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index f147a20b73..2e2e5261de 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -18,10 +18,9 @@ import logging
 from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
-from synapse.types import get_domain_from_id, UserID
+from synapse.types import UserID, get_domain_from_id
 from synapse.util.stringutils import random_string
 
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index c5b6e75e03..ef866da1b6 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -14,15 +14,16 @@
 # limitations under the License.
 
 
+import logging
+import string
+
 from twisted.internet import defer
-from ._base import BaseHandler
 
-from synapse.api.errors import SynapseError, Codes, CodeMessageException, AuthError
 from synapse.api.constants import EventTypes
+from synapse.api.errors import AuthError, CodeMessageException, Codes, SynapseError
 from synapse.types import RoomAlias, UserID, get_domain_from_id
 
-import logging
-import string
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 8a2d177539..5816bf8b4f 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -14,17 +14,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import simplejson as json
 import logging
 
-from canonicaljson import encode_canonical_json
-from twisted.internet import defer
 from six import iteritems
 
-from synapse.api.errors import (
-    SynapseError, CodeMessageException, FederationDeniedError,
-)
-from synapse.types import get_domain_from_id, UserID
+from canonicaljson import encode_canonical_json, json
+
+from twisted.internet import defer
+
+from synapse.api.errors import CodeMessageException, FederationDeniedError, SynapseError
+from synapse.types import UserID, get_domain_from_id
 from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 from synapse.util.retryutils import NotRetryingDestination
 
@@ -80,7 +79,7 @@ class E2eKeysHandler(object):
             else:
                 remote_queries[user_id] = device_ids
 
-        # Firt get local devices.
+        # First get local devices.
         failures = {}
         results = {}
         if local_query:
@@ -357,7 +356,7 @@ def _exception_to_failure(e):
     # include ConnectionRefused and other errors
     #
     # Note that some Exceptions (notably twisted's ResponseFailed etc) don't
-    # give a string for e.message, which simplejson then fails to serialize.
+    # give a string for e.message, which json then fails to serialize.
     return {
         "status": 503, "message": str(e.message),
     }
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 8bc642675f..f772e62c28 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -13,20 +13,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+import random
+
 from twisted.internet import defer
 
-from synapse.util.logutils import log_function
-from synapse.types import UserID
-from synapse.events.utils import serialize_event
-from synapse.api.constants import Membership, EventTypes
+from synapse.api.constants import EventTypes, Membership
+from synapse.api.errors import AuthError
 from synapse.events import EventBase
+from synapse.events.utils import serialize_event
+from synapse.types import UserID
+from synapse.util.logutils import log_function
+from synapse.visibility import filter_events_for_client
 
 from ._base import BaseHandler
 
-import logging
-import random
-
-
 logger = logging.getLogger(__name__)
 
 
@@ -130,11 +131,13 @@ class EventStreamHandler(BaseHandler):
 class EventHandler(BaseHandler):
 
     @defer.inlineCallbacks
-    def get_event(self, user, event_id):
+    def get_event(self, user, room_id, event_id):
         """Retrieve a single specified event.
 
         Args:
             user (synapse.types.UserID): The user requesting the event
+            room_id (str|None): The expected room id. We'll return None if the
+                event's room does not match.
             event_id (str): The event ID to obtain.
         Returns:
             dict: An event, or None if there is no event matching this ID.
@@ -143,13 +146,26 @@ class EventHandler(BaseHandler):
             AuthError if the user does not have the rights to inspect this
             event.
         """
-        event = yield self.store.get_event(event_id)
+        event = yield self.store.get_event(event_id, check_room_id=room_id)
 
         if not event:
             defer.returnValue(None)
             return
 
-        if hasattr(event, "room_id"):
-            yield self.auth.check_joined_room(event.room_id, user.to_string())
+        users = yield self.store.get_users_in_room(event.room_id)
+        is_peeking = user.to_string() not in users
+
+        filtered = yield filter_events_for_client(
+            self.store,
+            user.to_string(),
+            [event],
+            is_peeking=is_peeking
+        )
+
+        if not filtered:
+            raise AuthError(
+                403,
+                "You don't have permission to access that event."
+            )
 
         defer.returnValue(event)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 495ac4c648..0ebf0fd188 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -20,37 +20,51 @@ import itertools
 import logging
 import sys
 
+import six
+from six import iteritems, itervalues
+from six.moves import http_client, zip
+
 from signedjson.key import decode_verify_key_bytes
 from signedjson.sign import verify_signed_json
-import six
-from six.moves import http_client
-from six import iteritems
-from twisted.internet import defer
 from unpaddedbase64 import decode_base64
 
-from ._base import BaseHandler
+from twisted.internet import defer
 
+from synapse.api.constants import (
+    KNOWN_ROOM_VERSIONS,
+    EventTypes,
+    Membership,
+    RejectedReason,
+)
 from synapse.api.errors import (
-    AuthError, FederationError, StoreError, CodeMessageException, SynapseError,
+    AuthError,
+    CodeMessageException,
     FederationDeniedError,
+    FederationError,
+    StoreError,
+    SynapseError,
 )
-from synapse.api.constants import EventTypes, Membership, RejectedReason
-from synapse.events.validator import EventValidator
-from synapse.util import unwrapFirstError, logcontext
-from synapse.util.metrics import measure_func
-from synapse.util.logutils import log_function
-from synapse.util.async import run_on_reactor, Linearizer
-from synapse.util.frozenutils import unfreeze
 from synapse.crypto.event_signing import (
-    compute_event_signature, add_hashes_and_signatures,
+    add_hashes_and_signatures,
+    compute_event_signature,
 )
+from synapse.events.validator import EventValidator
+from synapse.replication.http.federation import (
+    ReplicationCleanRoomRestServlet,
+    ReplicationFederationSendEventsRestServlet,
+)
+from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
+from synapse.state import resolve_events_with_factory
 from synapse.types import UserID, get_domain_from_id
-
-from synapse.events.utils import prune_event
-
+from synapse.util import logcontext, unwrapFirstError
+from synapse.util.async_helpers import Linearizer
+from synapse.util.distributor import user_joined_room
+from synapse.util.frozenutils import unfreeze
+from synapse.util.logutils import log_function
 from synapse.util.retryutils import NotRetryingDestination
+from synapse.visibility import filter_events_for_server
 
-from synapse.util.distributor import user_joined_room
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
@@ -72,7 +86,7 @@ class FederationHandler(BaseHandler):
         self.hs = hs
 
         self.store = hs.get_datastore()
-        self.replication_layer = hs.get_federation_client()
+        self.federation_client = hs.get_federation_client()
         self.state_handler = hs.get_state_handler()
         self.server_name = hs.hostname
         self.keyring = hs.get_keyring()
@@ -82,6 +96,18 @@ class FederationHandler(BaseHandler):
         self.spam_checker = hs.get_spam_checker()
         self.event_creation_handler = hs.get_event_creation_handler()
         self._server_notices_mxid = hs.config.server_notices_mxid
+        self.config = hs.config
+        self.http_client = hs.get_simple_http_client()
+
+        self._send_events_to_master = (
+            ReplicationFederationSendEventsRestServlet.make_client(hs)
+        )
+        self._notify_user_membership_change = (
+            ReplicationUserJoinedLeftRoomRestServlet.make_client(hs)
+        )
+        self._clean_room_for_join_client = (
+            ReplicationCleanRoomRestServlet.make_client(hs)
+        )
 
         # When joining a room we need to queue any events for that room up
         self.room_queues = {}
@@ -89,7 +115,9 @@ class FederationHandler(BaseHandler):
 
     @defer.inlineCallbacks
     @log_function
-    def on_receive_pdu(self, origin, pdu, get_missing=True):
+    def on_receive_pdu(
+            self, origin, pdu, get_missing=True, sent_to_us_directly=False,
+    ):
         """ Process a PDU received via a federation /send/ transaction, or
         via backfill of missing prev_events
 
@@ -103,8 +131,10 @@ class FederationHandler(BaseHandler):
         """
 
         # We reprocess pdus when we have seen them only as outliers
-        existing = yield self.get_persisted_pdu(
-            origin, pdu.event_id, do_auth=False
+        existing = yield self.store.get_event(
+            pdu.event_id,
+            allow_none=True,
+            allow_rejected=True,
         )
 
         # FIXME: Currently we fetch an event again when we already have it
@@ -161,14 +191,11 @@ class FederationHandler(BaseHandler):
                     "Ignoring PDU %s for room %s from %s as we've left the room!",
                     pdu.event_id, pdu.room_id, origin,
                 )
-                return
+                defer.returnValue(None)
 
         state = None
-
         auth_chain = []
 
-        fetch_state = False
-
         # Get missing pdus if necessary.
         if not pdu.internal_metadata.is_outlier():
             # We only backfill backwards to the min depth.
@@ -223,26 +250,61 @@ class FederationHandler(BaseHandler):
                         list(prevs - seen)[:5],
                     )
 
-            if prevs - seen:
-                logger.info(
-                    "Still missing %d events for room %r: %r...",
-                    len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
+            if sent_to_us_directly and prevs - seen:
+                # If they have sent it to us directly, and the server
+                # isn't telling us about the auth events that it's
+                # made a message referencing, we explode
+                raise FederationError(
+                    "ERROR",
+                    403,
+                    (
+                        "Your server isn't divulging details about prev_events "
+                        "referenced in this event."
+                    ),
+                    affected=pdu.event_id,
                 )
-                fetch_state = True
+            elif prevs - seen:
+                # Calculate the state of the previous events, and
+                # de-conflict them to find the current state.
+                state_groups = []
+                auth_chains = set()
+                try:
+                    # Get the state of the events we know about
+                    ours = yield self.store.get_state_groups(pdu.room_id, list(seen))
+                    state_groups.append(ours)
+
+                    # Ask the remote server for the states we don't
+                    # know about
+                    for p in prevs - seen:
+                        state, got_auth_chain = (
+                            yield self.federation_client.get_state_for_room(
+                                origin, pdu.room_id, p
+                            )
+                        )
+                        auth_chains.update(got_auth_chain)
+                        state_group = {(x.type, x.state_key): x.event_id for x in state}
+                        state_groups.append(state_group)
+
+                    # Resolve any conflicting state
+                    def fetch(ev_ids):
+                        return self.store.get_events(
+                            ev_ids, get_prev_content=False, check_redacted=False
+                        )
 
-        if fetch_state:
-            # We need to get the state at this event, since we haven't
-            # processed all the prev events.
-            logger.debug(
-                "_handle_new_pdu getting state for %s",
-                pdu.room_id
-            )
-            try:
-                state, auth_chain = yield self.replication_layer.get_state_for_room(
-                    origin, pdu.room_id, pdu.event_id,
-                )
-            except Exception:
-                logger.exception("Failed to get state for event: %s", pdu.event_id)
+                    room_version = yield self.store.get_room_version(pdu.room_id)
+                    state_map = yield resolve_events_with_factory(
+                        room_version, state_groups, {pdu.event_id: pdu}, fetch
+                    )
+
+                    state = (yield self.store.get_events(state_map.values())).values()
+                    auth_chain = list(auth_chains)
+                except Exception:
+                    raise FederationError(
+                        "ERROR",
+                        403,
+                        "We can't get valid state history.",
+                        affected=pdu.event_id,
+                    )
 
         yield self._process_received_pdu(
             origin,
@@ -299,7 +361,7 @@ class FederationHandler(BaseHandler):
         #
         # see https://github.com/matrix-org/synapse/pull/1744
 
-        missing_events = yield self.replication_layer.get_missing_events(
+        missing_events = yield self.federation_client.get_missing_events(
             origin,
             pdu.room_id,
             earliest_events_ids=list(latest),
@@ -320,11 +382,17 @@ class FederationHandler(BaseHandler):
 
         for e in missing_events:
             logger.info("Handling found event %s", e.event_id)
-            yield self.on_receive_pdu(
-                origin,
-                e,
-                get_missing=False
-            )
+            try:
+                yield self.on_receive_pdu(
+                    origin,
+                    e,
+                    get_missing=False
+                )
+            except FederationError as e:
+                if e.code == 403:
+                    logger.warn("Event %s failed history check.")
+                else:
+                    raise
 
     @log_function
     @defer.inlineCallbacks
@@ -355,7 +423,7 @@ class FederationHandler(BaseHandler):
             )
 
             try:
-                event_stream_id, max_stream_id = yield self._persist_auth_tree(
+                yield self._persist_auth_tree(
                     origin, auth_chain, state, event
                 )
             except AuthError as e:
@@ -399,7 +467,7 @@ class FederationHandler(BaseHandler):
                 yield self._handle_new_events(origin, event_infos)
 
             try:
-                context, event_stream_id, max_stream_id = yield self._handle_new_event(
+                context = yield self._handle_new_event(
                     origin,
                     event,
                     state=state,
@@ -424,24 +492,16 @@ class FederationHandler(BaseHandler):
             except StoreError:
                 logger.exception("Failed to store room.")
 
-        extra_users = []
-        if event.type == EventTypes.Member:
-            target_user_id = event.state_key
-            target_user = UserID.from_string(target_user_id)
-            extra_users.append(target_user)
-
-        self.notifier.on_new_room_event(
-            event, event_stream_id, max_stream_id,
-            extra_users=extra_users
-        )
-
         if event.type == EventTypes.Member:
             if event.membership == Membership.JOIN:
                 # Only fire user_joined_room if the user has acutally
                 # joined the room. Don't bother if the user is just
                 # changing their profile info.
                 newly_joined = True
-                prev_state_id = context.prev_state_ids.get(
+
+                prev_state_ids = yield context.get_prev_state_ids(self.store)
+
+                prev_state_id = prev_state_ids.get(
                     (event.type, event.state_key)
                 )
                 if prev_state_id:
@@ -453,84 +513,7 @@ class FederationHandler(BaseHandler):
 
                 if newly_joined:
                     user = UserID.from_string(event.state_key)
-                    yield user_joined_room(self.distributor, user, event.room_id)
-
-    @measure_func("_filter_events_for_server")
-    @defer.inlineCallbacks
-    def _filter_events_for_server(self, server_name, room_id, events):
-        event_to_state_ids = yield self.store.get_state_ids_for_events(
-            frozenset(e.event_id for e in events),
-            types=(
-                (EventTypes.RoomHistoryVisibility, ""),
-                (EventTypes.Member, None),
-            )
-        )
-
-        # We only want to pull out member events that correspond to the
-        # server's domain.
-
-        def check_match(id):
-            try:
-                return server_name == get_domain_from_id(id)
-            except Exception:
-                return False
-
-        # Parses mapping `event_id -> (type, state_key) -> state event_id`
-        # to get all state ids that we're interested in.
-        event_map = yield self.store.get_events([
-            e_id
-            for key_to_eid in list(event_to_state_ids.values())
-            for key, e_id in key_to_eid.items()
-            if key[0] != EventTypes.Member or check_match(key[1])
-        ])
-
-        event_to_state = {
-            e_id: {
-                key: event_map[inner_e_id]
-                for key, inner_e_id in key_to_eid.iteritems()
-                if inner_e_id in event_map
-            }
-            for e_id, key_to_eid in event_to_state_ids.iteritems()
-        }
-
-        def redact_disallowed(event, state):
-            if not state:
-                return event
-
-            history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
-            if history:
-                visibility = history.content.get("history_visibility", "shared")
-                if visibility in ["invited", "joined"]:
-                    # We now loop through all state events looking for
-                    # membership states for the requesting server to determine
-                    # if the server is either in the room or has been invited
-                    # into the room.
-                    for ev in state.itervalues():
-                        if ev.type != EventTypes.Member:
-                            continue
-                        try:
-                            domain = get_domain_from_id(ev.state_key)
-                        except Exception:
-                            continue
-
-                        if domain != server_name:
-                            continue
-
-                        memtype = ev.membership
-                        if memtype == Membership.JOIN:
-                            return event
-                        elif memtype == Membership.INVITE:
-                            if visibility == "invited":
-                                return event
-                    else:
-                        return prune_event(event)
-
-            return event
-
-        defer.returnValue([
-            redact_disallowed(e, event_to_state[e.event_id])
-            for e in events
-        ])
+                    yield self.user_joined_room(user, event.room_id)
 
     @log_function
     @defer.inlineCallbacks
@@ -551,7 +534,7 @@ class FederationHandler(BaseHandler):
         if dest == self.server_name:
             raise SynapseError(400, "Can't backfill from self.")
 
-        events = yield self.replication_layer.backfill(
+        events = yield self.federation_client.backfill(
             dest,
             room_id,
             limit=limit,
@@ -599,7 +582,7 @@ class FederationHandler(BaseHandler):
         state_events = {}
         events_to_state = {}
         for e_id in edges:
-            state, auth = yield self.replication_layer.get_state_for_room(
+            state, auth = yield self.federation_client.get_state_for_room(
                 destination=dest,
                 room_id=room_id,
                 event_id=e_id
@@ -641,7 +624,7 @@ class FederationHandler(BaseHandler):
                 results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
                     [
                         logcontext.run_in_background(
-                            self.replication_layer.get_pdu,
+                            self.federation_client.get_pdu,
                             [dest],
                             event_id,
                             outlier=True,
@@ -763,7 +746,7 @@ class FederationHandler(BaseHandler):
             """
             joined_users = [
                 (state_key, int(event.depth))
-                for (e_type, state_key), event in state.iteritems()
+                for (e_type, state_key), event in iteritems(state)
                 if e_type == EventTypes.Member
                 and event.membership == Membership.JOIN
             ]
@@ -780,7 +763,7 @@ class FederationHandler(BaseHandler):
                 except Exception:
                     pass
 
-            return sorted(joined_domains.iteritems(), key=lambda d: d[1])
+            return sorted(joined_domains.items(), key=lambda d: d[1])
 
         curr_domains = get_domains_from_state(curr_state)
 
@@ -843,7 +826,7 @@ class FederationHandler(BaseHandler):
         tried_domains = set(likely_domains)
         tried_domains.add(self.server_name)
 
-        event_ids = list(extremities.iterkeys())
+        event_ids = list(extremities.keys())
 
         logger.debug("calling resolve_state_groups in _maybe_backfill")
         resolve = logcontext.preserve_fn(
@@ -859,15 +842,15 @@ class FederationHandler(BaseHandler):
         states = dict(zip(event_ids, [s.state for s in states]))
 
         state_map = yield self.store.get_events(
-            [e_id for ids in states.itervalues() for e_id in ids.itervalues()],
+            [e_id for ids in itervalues(states) for e_id in itervalues(ids)],
             get_prev_content=False
         )
         states = {
             key: {
                 k: state_map[e_id]
-                for k, e_id in state_dict.iteritems()
+                for k, e_id in iteritems(state_dict)
                 if e_id in state_map
-            } for key, state_dict in states.iteritems()
+            } for key, state_dict in iteritems(states)
         }
 
         for e_id, _ in sorted_extremeties_tuple:
@@ -922,7 +905,7 @@ class FederationHandler(BaseHandler):
 
         Invites must be signed by the invitee's server before distribution.
         """
-        pdu = yield self.replication_layer.send_invite(
+        pdu = yield self.federation_client.send_invite(
             destination=target_host,
             room_id=event.room_id,
             event_id=event.event_id,
@@ -938,16 +921,6 @@ class FederationHandler(BaseHandler):
             [auth_id for auth_id, _ in event.auth_events],
             include_given=True
         )
-
-        for event in auth:
-            event.signatures.update(
-                compute_event_signature(
-                    event,
-                    self.hs.hostname,
-                    self.hs.config.signing_key[0]
-                )
-            )
-
         defer.returnValue([e for e in auth])
 
     @log_function
@@ -972,6 +945,9 @@ class FederationHandler(BaseHandler):
             joinee,
             "join",
             content,
+            params={
+                "ver": KNOWN_ROOM_VERSIONS,
+            },
         )
 
         # This shouldn't happen, because the RoomMemberHandler has a
@@ -981,7 +957,7 @@ class FederationHandler(BaseHandler):
 
         self.room_queues[room_id] = []
 
-        yield self.store.clean_room_for_join(room_id)
+        yield self._clean_room_for_join(room_id)
 
         handled_events = set()
 
@@ -994,7 +970,7 @@ class FederationHandler(BaseHandler):
                 target_hosts.insert(0, origin)
             except ValueError:
                 pass
-            ret = yield self.replication_layer.send_join(target_hosts, event)
+            ret = yield self.federation_client.send_join(target_hosts, event)
 
             origin = ret["origin"]
             state = ret["state"]
@@ -1020,15 +996,10 @@ class FederationHandler(BaseHandler):
                 # FIXME
                 pass
 
-            event_stream_id, max_stream_id = yield self._persist_auth_tree(
+            yield self._persist_auth_tree(
                 origin, auth_chain, state, event
             )
 
-            self.notifier.on_new_room_event(
-                event, event_stream_id, max_stream_id,
-                extra_users=[joinee]
-            )
-
             logger.debug("Finished joining %s to %s", joinee, room_id)
         finally:
             room_queue = self.room_queues[room_id]
@@ -1123,7 +1094,7 @@ class FederationHandler(BaseHandler):
         # would introduce the danger of backwards-compatibility problems.
         event.internal_metadata.send_on_behalf_of = origin
 
-        context, event_stream_id, max_stream_id = yield self._handle_new_event(
+        context = yield self._handle_new_event(
             origin, event
         )
 
@@ -1133,25 +1104,17 @@ class FederationHandler(BaseHandler):
             event.signatures,
         )
 
-        extra_users = []
-        if event.type == EventTypes.Member:
-            target_user_id = event.state_key
-            target_user = UserID.from_string(target_user_id)
-            extra_users.append(target_user)
-
-        self.notifier.on_new_room_event(
-            event, event_stream_id, max_stream_id, extra_users=extra_users
-        )
-
         if event.type == EventTypes.Member:
             if event.content["membership"] == Membership.JOIN:
                 user = UserID.from_string(event.state_key)
-                yield user_joined_room(self.distributor, user, event.room_id)
+                yield self.user_joined_room(user, event.room_id)
+
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
 
-        state_ids = list(context.prev_state_ids.values())
+        state_ids = list(prev_state_ids.values())
         auth_chain = yield self.store.get_auth_chain(state_ids)
 
-        state = yield self.store.get_events(list(context.prev_state_ids.values()))
+        state = yield self.store.get_events(list(prev_state_ids.values()))
 
         defer.returnValue({
             "state": list(state.values()),
@@ -1213,17 +1176,7 @@ class FederationHandler(BaseHandler):
         )
 
         context = yield self.state_handler.compute_event_context(event)
-
-        event_stream_id, max_stream_id = yield self.store.persist_event(
-            event,
-            context=context,
-        )
-
-        target_user = UserID.from_string(event.state_key)
-        self.notifier.on_new_room_event(
-            event, event_stream_id, max_stream_id,
-            extra_users=[target_user],
-        )
+        yield self.persist_events_and_notify([(event, context)])
 
         defer.returnValue(event)
 
@@ -1248,35 +1201,26 @@ class FederationHandler(BaseHandler):
         except ValueError:
             pass
 
-        yield self.replication_layer.send_leave(
+        yield self.federation_client.send_leave(
             target_hosts,
             event
         )
 
         context = yield self.state_handler.compute_event_context(event)
-
-        event_stream_id, max_stream_id = yield self.store.persist_event(
-            event,
-            context=context,
-        )
-
-        target_user = UserID.from_string(event.state_key)
-        self.notifier.on_new_room_event(
-            event, event_stream_id, max_stream_id,
-            extra_users=[target_user],
-        )
+        yield self.persist_events_and_notify([(event, context)])
 
         defer.returnValue(event)
 
     @defer.inlineCallbacks
     def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
-                               content={},):
-        origin, pdu = yield self.replication_layer.make_membership_event(
+                               content={}, params=None):
+        origin, pdu = yield self.federation_client.make_membership_event(
             target_hosts,
             room_id,
             user_id,
             membership,
             content,
+            params=params,
         )
 
         logger.debug("Got response to make_%s: %s", membership, pdu)
@@ -1316,7 +1260,7 @@ class FederationHandler(BaseHandler):
     @log_function
     def on_make_leave_request(self, room_id, user_id):
         """ We've received a /make_leave/ request, so we create a partial
-        join event for the room and return that. We do *not* persist or
+        leave event for the room and return that. We do *not* persist or
         process it until the other server has signed it and sent it back.
         """
         builder = self.event_builder_factory.new({
@@ -1355,7 +1299,7 @@ class FederationHandler(BaseHandler):
 
         event.internal_metadata.outlier = False
 
-        context, event_stream_id, max_stream_id = yield self._handle_new_event(
+        yield self._handle_new_event(
             origin, event
         )
 
@@ -1365,23 +1309,16 @@ class FederationHandler(BaseHandler):
             event.signatures,
         )
 
-        extra_users = []
-        if event.type == EventTypes.Member:
-            target_user_id = event.state_key
-            target_user = UserID.from_string(target_user_id)
-            extra_users.append(target_user)
-
-        self.notifier.on_new_room_event(
-            event, event_stream_id, max_stream_id, extra_users=extra_users
-        )
-
         defer.returnValue(None)
 
     @defer.inlineCallbacks
     def get_state_for_pdu(self, room_id, event_id):
         """Returns the state at the event. i.e. not including said event.
         """
-        yield run_on_reactor()
+
+        event = yield self.store.get_event(
+            event_id, allow_none=False, check_room_id=room_id,
+        )
 
         state_groups = yield self.store.get_state_groups(
             room_id, [event_id]
@@ -1393,8 +1330,7 @@ class FederationHandler(BaseHandler):
                 (e.type, e.state_key): e for e in state
             }
 
-            event = yield self.store.get_event(event_id)
-            if event and event.is_state():
+            if event.is_state():
                 # Get previous state
                 if "replaces_state" in event.unsigned:
                     prev_id = event.unsigned["replaces_state"]
@@ -1405,18 +1341,6 @@ class FederationHandler(BaseHandler):
                     del results[(event.type, event.state_key)]
 
             res = list(results.values())
-            for event in res:
-                # We sign these again because there was a bug where we
-                # incorrectly signed things the first time round
-                if self.is_mine_id(event.event_id):
-                    event.signatures.update(
-                        compute_event_signature(
-                            event,
-                            self.hs.hostname,
-                            self.hs.config.signing_key[0]
-                        )
-                    )
-
             defer.returnValue(res)
         else:
             defer.returnValue([])
@@ -1425,7 +1349,9 @@ class FederationHandler(BaseHandler):
     def get_state_ids_for_pdu(self, room_id, event_id):
         """Returns the state at the event. i.e. not including said event.
         """
-        yield run_on_reactor()
+        event = yield self.store.get_event(
+            event_id, allow_none=False, check_room_id=room_id,
+        )
 
         state_groups = yield self.store.get_state_groups_ids(
             room_id, [event_id]
@@ -1435,8 +1361,7 @@ class FederationHandler(BaseHandler):
             _, state = state_groups.items().pop()
             results = state
 
-            event = yield self.store.get_event(event_id)
-            if event and event.is_state():
+            if event.is_state():
                 # Get previous state
                 if "replaces_state" in event.unsigned:
                     prev_id = event.unsigned["replaces_state"]
@@ -1462,17 +1387,26 @@ class FederationHandler(BaseHandler):
             limit
         )
 
-        events = yield self._filter_events_for_server(origin, room_id, events)
+        events = yield filter_events_for_server(self.store, origin, events)
 
         defer.returnValue(events)
 
     @defer.inlineCallbacks
     @log_function
-    def get_persisted_pdu(self, origin, event_id, do_auth=True):
-        """ Get a PDU from the database with given origin and id.
+    def get_persisted_pdu(self, origin, event_id):
+        """Get an event from the database for the given server.
+
+        Args:
+            origin [str]: hostname of server which is requesting the event; we
+               will check that the server is allowed to see it.
+            event_id [str]: id of the event being requested
 
         Returns:
-            Deferred: Results in a `Pdu`.
+            Deferred[EventBase|None]: None if we know nothing about the event;
+                otherwise the (possibly-redacted) event.
+
+        Raises:
+            AuthError if the server is not currently in the room
         """
         event = yield self.store.get_event(
             event_id,
@@ -1481,32 +1415,17 @@ class FederationHandler(BaseHandler):
         )
 
         if event:
-            if self.is_mine_id(event.event_id):
-                # FIXME: This is a temporary work around where we occasionally
-                # return events slightly differently than when they were
-                # originally signed
-                event.signatures.update(
-                    compute_event_signature(
-                        event,
-                        self.hs.hostname,
-                        self.hs.config.signing_key[0]
-                    )
-                )
-
-            if do_auth:
-                in_room = yield self.auth.check_host_in_room(
-                    event.room_id,
-                    origin
-                )
-                if not in_room:
-                    raise AuthError(403, "Host not in room.")
-
-                events = yield self._filter_events_for_server(
-                    origin, event.room_id, [event]
-                )
-
-                event = events[0]
+            in_room = yield self.auth.check_host_in_room(
+                event.room_id,
+                origin
+            )
+            if not in_room:
+                raise AuthError(403, "Host not in room.")
 
+            events = yield filter_events_for_server(
+                self.store, origin, [event],
+            )
+            event = events[0]
             defer.returnValue(event)
         else:
             defer.returnValue(None)
@@ -1531,9 +1450,8 @@ class FederationHandler(BaseHandler):
                     event, context
                 )
 
-            event_stream_id, max_stream_id = yield self.store.persist_event(
-                event,
-                context=context,
+            yield self.persist_events_and_notify(
+                [(event, context)],
                 backfilled=backfilled,
             )
         except:  # noqa: E722, as we reraise the exception this is fine.
@@ -1546,15 +1464,7 @@ class FederationHandler(BaseHandler):
 
             six.reraise(tp, value, tb)
 
-        if not backfilled:
-            # this intentionally does not yield: we don't care about the result
-            # and don't need to wait for it.
-            logcontext.run_in_background(
-                self.pusher_pool.on_new_notifications,
-                event_stream_id, max_stream_id,
-            )
-
-        defer.returnValue((context, event_stream_id, max_stream_id))
+        defer.returnValue(context)
 
     @defer.inlineCallbacks
     def _handle_new_events(self, origin, event_infos, backfilled=False):
@@ -1562,6 +1472,8 @@ class FederationHandler(BaseHandler):
         should not depend on one another, e.g. this should be used to persist
         a bunch of outliers, but not a chunk of individual events that depend
         on each other for state calculations.
+
+        Notifies about the events where appropriate.
         """
         contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
             [
@@ -1576,10 +1488,10 @@ class FederationHandler(BaseHandler):
             ], consumeErrors=True,
         ))
 
-        yield self.store.persist_events(
+        yield self.persist_events_and_notify(
             [
                 (ev_info["event"], context)
-                for ev_info, context in itertools.izip(event_infos, contexts)
+                for ev_info, context in zip(event_infos, contexts)
             ],
             backfilled=backfilled,
         )
@@ -1588,7 +1500,8 @@ class FederationHandler(BaseHandler):
     def _persist_auth_tree(self, origin, auth_events, state, event):
         """Checks the auth chain is valid (and passes auth checks) for the
         state and event. Then persists the auth chain and state atomically.
-        Persists the event seperately.
+        Persists the event separately. Notifies about the persisted events
+        where appropriate.
 
         Will attempt to fetch missing auth events.
 
@@ -1599,8 +1512,7 @@ class FederationHandler(BaseHandler):
             event (Event)
 
         Returns:
-            2-tuple of (event_stream_id, max_stream_id) from the persist_event
-            call for `event`
+            Deferred
         """
         events_to_context = {}
         for e in itertools.chain(auth_events, state):
@@ -1626,7 +1538,7 @@ class FederationHandler(BaseHandler):
                     missing_auth_events.add(e_id)
 
         for e_id in missing_auth_events:
-            m_ev = yield self.replication_layer.get_pdu(
+            m_ev = yield self.federation_client.get_pdu(
                 [origin],
                 e_id,
                 outlier=True,
@@ -1664,7 +1576,7 @@ class FederationHandler(BaseHandler):
                     raise
                 events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
 
-        yield self.store.persist_events(
+        yield self.persist_events_and_notify(
             [
                 (e, events_to_context[e.event_id])
                 for e in itertools.chain(auth_events, state)
@@ -1675,12 +1587,10 @@ class FederationHandler(BaseHandler):
             event, old_state=state
         )
 
-        event_stream_id, max_stream_id = yield self.store.persist_event(
-            event, new_event_context,
+        yield self.persist_events_and_notify(
+            [(event, new_event_context)],
         )
 
-        defer.returnValue((event_stream_id, max_stream_id))
-
     @defer.inlineCallbacks
     def _prep_event(self, origin, event, state=None, auth_events=None):
         """
@@ -1699,8 +1609,9 @@ class FederationHandler(BaseHandler):
         )
 
         if not auth_events:
+            prev_state_ids = yield context.get_prev_state_ids(self.store)
             auth_events_ids = yield self.auth.compute_auth_events(
-                event, context.prev_state_ids, for_verification=True,
+                event, prev_state_ids, for_verification=True,
             )
             auth_events = yield self.store.get_events(auth_events_ids)
             auth_events = {
@@ -1736,8 +1647,19 @@ class FederationHandler(BaseHandler):
         defer.returnValue(context)
 
     @defer.inlineCallbacks
-    def on_query_auth(self, origin, event_id, remote_auth_chain, rejects,
+    def on_query_auth(self, origin, event_id, room_id, remote_auth_chain, rejects,
                       missing):
+        in_room = yield self.auth.check_host_in_room(
+            room_id,
+            origin
+        )
+        if not in_room:
+            raise AuthError(403, "Host not in room.")
+
+        event = yield self.store.get_event(
+            event_id, allow_none=False, check_room_id=room_id
+        )
+
         # Just go through and process each event in `remote_auth_chain`. We
         # don't want to fall into the trap of `missing` being wrong.
         for e in remote_auth_chain:
@@ -1747,7 +1669,6 @@ class FederationHandler(BaseHandler):
                 pass
 
         # Now get the current auth_chain for the event.
-        event = yield self.store.get_event(event_id)
         local_auth_chain = yield self.store.get_auth_chain(
             [auth_id for auth_id, _ in event.auth_events],
             include_given=True
@@ -1760,15 +1681,6 @@ class FederationHandler(BaseHandler):
             local_auth_chain, remote_auth_chain
         )
 
-        for event in ret["auth_chain"]:
-            event.signatures.update(
-                compute_event_signature(
-                    event,
-                    self.hs.hostname,
-                    self.hs.config.signing_key[0]
-                )
-            )
-
         logger.debug("on_query_auth returning: %s", ret)
 
         defer.returnValue(ret)
@@ -1794,8 +1706,8 @@ class FederationHandler(BaseHandler):
             min_depth=min_depth,
         )
 
-        missing_events = yield self._filter_events_for_server(
-            origin, room_id, missing_events,
+        missing_events = yield filter_events_for_server(
+            self.store, origin, missing_events,
         )
 
         defer.returnValue(missing_events)
@@ -1844,7 +1756,7 @@ class FederationHandler(BaseHandler):
             logger.info("Missing auth: %s", missing_auth)
             # If we don't have all the auth events, we need to get them.
             try:
-                remote_auth_chain = yield self.replication_layer.get_event_auth(
+                remote_auth_chain = yield self.federation_client.get_event_auth(
                     origin, event.room_id, event.event_id
                 )
 
@@ -1917,7 +1829,10 @@ class FederationHandler(BaseHandler):
                     (d.type, d.state_key): d for d in different_events if d
                 })
 
+                room_version = yield self.store.get_room_version(event.room_id)
+
                 new_state = self.state_handler.resolve_events(
+                    room_version,
                     [list(local_view.values()), list(remote_view.values())],
                     event
                 )
@@ -1949,9 +1864,10 @@ class FederationHandler(BaseHandler):
                         break
 
             if do_resolution:
+                prev_state_ids = yield context.get_prev_state_ids(self.store)
                 # 1. Get what we think is the auth chain.
                 auth_ids = yield self.auth.compute_auth_events(
-                    event, context.prev_state_ids
+                    event, prev_state_ids
                 )
                 local_auth_chain = yield self.store.get_auth_chain(
                     auth_ids, include_given=True
@@ -1959,7 +1875,7 @@ class FederationHandler(BaseHandler):
 
                 try:
                     # 2. Get remote difference.
-                    result = yield self.replication_layer.query_auth(
+                    result = yield self.federation_client.query_auth(
                         origin,
                         event.room_id,
                         event.event_id,
@@ -2041,21 +1957,34 @@ class FederationHandler(BaseHandler):
             k: a.event_id for k, a in iteritems(auth_events)
             if k != event_key
         }
-        context.current_state_ids = dict(context.current_state_ids)
-        context.current_state_ids.update(state_updates)
-        if context.delta_ids is not None:
-            context.delta_ids = dict(context.delta_ids)
-            context.delta_ids.update(state_updates)
-        context.prev_state_ids = dict(context.prev_state_ids)
-        context.prev_state_ids.update({
+        current_state_ids = yield context.get_current_state_ids(self.store)
+        current_state_ids = dict(current_state_ids)
+
+        current_state_ids.update(state_updates)
+
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = dict(prev_state_ids)
+
+        prev_state_ids.update({
             k: a.event_id for k, a in iteritems(auth_events)
         })
-        context.state_group = yield self.store.store_state_group(
+
+        # create a new state group as a delta from the existing one.
+        prev_group = context.state_group
+        state_group = yield self.store.store_state_group(
             event.event_id,
             event.room_id,
-            prev_group=context.prev_group,
-            delta_ids=context.delta_ids,
-            current_state_ids=context.current_state_ids,
+            prev_group=prev_group,
+            delta_ids=state_updates,
+            current_state_ids=current_state_ids,
+        )
+
+        yield context.update_state(
+            state_group=state_group,
+            current_state_ids=current_state_ids,
+            prev_state_ids=prev_state_ids,
+            prev_group=prev_group,
+            delta_ids=state_updates,
         )
 
     @defer.inlineCallbacks
@@ -2245,7 +2174,7 @@ class FederationHandler(BaseHandler):
             yield member_handler.send_membership_event(None, event, context)
         else:
             destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id))
-            yield self.replication_layer.forward_third_party_invite(
+            yield self.federation_client.forward_third_party_invite(
                 destinations,
                 room_id,
                 event_dict,
@@ -2295,7 +2224,8 @@ class FederationHandler(BaseHandler):
             event.content["third_party_invite"]["signed"]["token"]
         )
         original_invite = None
-        original_invite_id = context.prev_state_ids.get(key)
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        original_invite_id = prev_state_ids.get(key)
         if original_invite_id:
             original_invite = yield self.store.get_event(
                 original_invite_id, allow_none=True
@@ -2337,7 +2267,8 @@ class FederationHandler(BaseHandler):
         signed = event.content["third_party_invite"]["signed"]
         token = signed["token"]
 
-        invite_event_id = context.prev_state_ids.get(
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        invite_event_id = prev_state_ids.get(
             (EventTypes.ThirdPartyInvite, token,)
         )
 
@@ -2387,7 +2318,7 @@ class FederationHandler(BaseHandler):
                 for revocation.
         """
         try:
-            response = yield self.hs.get_simple_http_client().get_json(
+            response = yield self.http_client.get_json(
                 url,
                 {"public_key": public_key}
             )
@@ -2398,3 +2329,91 @@ class FederationHandler(BaseHandler):
             )
         if "valid" not in response or not response["valid"]:
             raise AuthError(403, "Third party certificate was invalid")
+
+    @defer.inlineCallbacks
+    def persist_events_and_notify(self, event_and_contexts, backfilled=False):
+        """Persists events and tells the notifier/pushers about them, if
+        necessary.
+
+        Args:
+            event_and_contexts(list[tuple[FrozenEvent, EventContext]])
+            backfilled (bool): Whether these events are a result of
+                backfilling or not
+
+        Returns:
+            Deferred
+        """
+        if self.config.worker_app:
+            yield self._send_events_to_master(
+                store=self.store,
+                event_and_contexts=event_and_contexts,
+                backfilled=backfilled
+            )
+        else:
+            max_stream_id = yield self.store.persist_events(
+                event_and_contexts,
+                backfilled=backfilled,
+            )
+
+            if not backfilled:  # Never notify for backfilled events
+                for event, _ in event_and_contexts:
+                    self._notify_persisted_event(event, max_stream_id)
+
+    def _notify_persisted_event(self, event, max_stream_id):
+        """Checks to see if notifier/pushers should be notified about the
+        event or not.
+
+        Args:
+            event (FrozenEvent)
+            max_stream_id (int): The max_stream_id returned by persist_events
+        """
+
+        extra_users = []
+        if event.type == EventTypes.Member:
+            target_user_id = event.state_key
+
+            # We notify for memberships if its an invite for one of our
+            # users
+            if event.internal_metadata.is_outlier():
+                if event.membership != Membership.INVITE:
+                    if not self.is_mine_id(target_user_id):
+                        return
+
+            target_user = UserID.from_string(target_user_id)
+            extra_users.append(target_user)
+        elif event.internal_metadata.is_outlier():
+            return
+
+        event_stream_id = event.internal_metadata.stream_ordering
+        self.notifier.on_new_room_event(
+            event, event_stream_id, max_stream_id,
+            extra_users=extra_users
+        )
+
+        self.pusher_pool.on_new_notifications(
+            event_stream_id, max_stream_id,
+        )
+
+    def _clean_room_for_join(self, room_id):
+        """Called to clean up any data in DB for a given room, ready for the
+        server to join the room.
+
+        Args:
+            room_id (str)
+        """
+        if self.config.worker_app:
+            return self._clean_room_for_join_client(room_id)
+        else:
+            return self.store.clean_room_for_join(room_id)
+
+    def user_joined_room(self, user, room_id):
+        """Called when a new user has joined the room
+        """
+        if self.config.worker_app:
+            return self._notify_user_membership_change(
+                room_id=room_id,
+                user_id=user.to_string(),
+                change="joined",
+            )
+        else:
+            return user_joined_room(self.distributor, user, room_id)
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index dcae083734..53e5e2648b 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -14,14 +14,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
+import logging
+
 from six import iteritems
 
+from twisted.internet import defer
+
 from synapse.api.errors import SynapseError
 from synapse.types import get_domain_from_id
 
-import logging
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 91a0898860..5feb3f22a6 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2015, 2016 OpenMarket Ltd
 # Copyright 2017 Vector Creations Ltd
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,16 +19,18 @@
 
 import logging
 
-import simplejson as json
+from canonicaljson import json
 
 from twisted.internet import defer
 
 from synapse.api.errors import (
-    MatrixCodeMessageException, CodeMessageException
+    CodeMessageException,
+    Codes,
+    HttpResponseException,
+    SynapseError,
 )
+
 from ._base import BaseHandler
-from synapse.util.async import run_on_reactor
-from synapse.api.errors import SynapseError, Codes
 
 logger = logging.getLogger(__name__)
 
@@ -38,6 +41,7 @@ class IdentityHandler(BaseHandler):
         super(IdentityHandler, self).__init__(hs)
 
         self.http_client = hs.get_simple_http_client()
+        self.federation_http_client = hs.get_http_client()
 
         self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers)
         self.trust_any_id_server_just_for_testing_do_not_use = (
@@ -60,8 +64,6 @@ class IdentityHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def threepid_from_creds(self, creds):
-        yield run_on_reactor()
-
         if 'id_server' in creds:
             id_server = creds['id_server']
         elif 'idServer' in creds:
@@ -83,7 +85,6 @@ class IdentityHandler(BaseHandler):
             )
             defer.returnValue(None)
 
-        data = {}
         try:
             data = yield self.http_client.get_json(
                 "https://%s%s" % (
@@ -92,11 +93,9 @@ class IdentityHandler(BaseHandler):
                 ),
                 {'sid': creds['sid'], 'client_secret': client_secret}
             )
-        except MatrixCodeMessageException as e:
+        except HttpResponseException as e:
             logger.info("getValidated3pid failed with Matrix error: %r", e)
-            raise SynapseError(e.code, e.msg, e.errcode)
-        except CodeMessageException as e:
-            data = json.loads(e.msg)
+            raise e.to_synapse_error()
 
         if 'medium' in data:
             defer.returnValue(data)
@@ -104,7 +103,6 @@ class IdentityHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def bind_threepid(self, creds, mxid):
-        yield run_on_reactor()
         logger.debug("binding threepid %r to %s", creds, mxid)
         data = None
 
@@ -135,13 +133,71 @@ class IdentityHandler(BaseHandler):
             )
             logger.debug("bound threepid %r to %s", creds, mxid)
         except CodeMessageException as e:
-            data = json.loads(e.msg)
+            data = json.loads(e.msg)  # XXX WAT?
         defer.returnValue(data)
 
     @defer.inlineCallbacks
-    def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
-        yield run_on_reactor()
+    def try_unbind_threepid(self, mxid, threepid):
+        """Removes a binding from an identity server
+
+        Args:
+            mxid (str): Matrix user ID of binding to be removed
+            threepid (dict): Dict with medium & address of binding to be removed
+
+        Raises:
+            SynapseError: If we failed to contact the identity server
+
+        Returns:
+            Deferred[bool]: True on success, otherwise False if the identity
+            server doesn't support unbinding
+        """
+        logger.debug("unbinding threepid %r from %s", threepid, mxid)
+        if not self.trusted_id_servers:
+            logger.warn("Can't unbind threepid: no trusted ID servers set in config")
+            defer.returnValue(False)
+
+        # We don't track what ID server we added 3pids on (perhaps we ought to)
+        # but we assume that any of the servers in the trusted list are in the
+        # same ID server federation, so we can pick any one of them to send the
+        # deletion request to.
+        id_server = next(iter(self.trusted_id_servers))
+
+        url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
+        content = {
+            "mxid": mxid,
+            "threepid": threepid,
+        }
+        headers = {}
+        # we abuse the federation http client to sign the request, but we have to send it
+        # using the normal http client since we don't want the SRV lookup and want normal
+        # 'browser-like' HTTPS.
+        self.federation_http_client.sign_request(
+            destination=None,
+            method='POST',
+            url_bytes='/_matrix/identity/api/v1/3pid/unbind'.encode('ascii'),
+            headers_dict=headers,
+            content=content,
+            destination_is=id_server,
+        )
+        try:
+            yield self.http_client.post_json_get_json(
+                url,
+                content,
+                headers,
+            )
+        except HttpResponseException as e:
+            if e.code in (400, 404, 501,):
+                # The remote server probably doesn't support unbinding (yet)
+                logger.warn("Received %d response while unbinding threepid", e.code)
+                defer.returnValue(False)
+            else:
+                logger.error("Failed to unbind threepid on identity server: %s", e)
+                raise SynapseError(502, "Failed to contact identity server")
+
+        defer.returnValue(True)
 
+    @defer.inlineCallbacks
+    def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
         if not self._should_trust_id_server(id_server):
             raise SynapseError(
                 400, "Untrusted ID server '%s'" % id_server,
@@ -164,20 +220,15 @@ class IdentityHandler(BaseHandler):
                 params
             )
             defer.returnValue(data)
-        except MatrixCodeMessageException as e:
-            logger.info("Proxied requestToken failed with Matrix error: %r", e)
-            raise SynapseError(e.code, e.msg, e.errcode)
-        except CodeMessageException as e:
+        except HttpResponseException as e:
             logger.info("Proxied requestToken failed: %r", e)
-            raise e
+            raise e.to_synapse_error()
 
     @defer.inlineCallbacks
     def requestMsisdnToken(
             self, id_server, country, phone_number,
             client_secret, send_attempt, **kwargs
     ):
-        yield run_on_reactor()
-
         if not self._should_trust_id_server(id_server):
             raise SynapseError(
                 400, "Untrusted ID server '%s'" % id_server,
@@ -201,9 +252,6 @@ class IdentityHandler(BaseHandler):
                 params
             )
             defer.returnValue(data)
-        except MatrixCodeMessageException as e:
-            logger.info("Proxied requestToken failed with Matrix error: %r", e)
-            raise SynapseError(e.code, e.msg, e.errcode)
-        except CodeMessageException as e:
+        except HttpResponseException as e:
             logger.info("Proxied requestToken failed: %r", e)
-            raise e
+            raise e.to_synapse_error()
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 71af86fe21..e009395207 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -13,6 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, Membership
@@ -21,20 +23,15 @@ from synapse.events.utils import serialize_event
 from synapse.events.validator import EventValidator
 from synapse.handlers.presence import format_user_presence_state
 from synapse.streams.config import PaginationConfig
-from synapse.types import (
-    UserID, StreamToken,
-)
+from synapse.types import StreamToken, UserID
 from synapse.util import unwrapFirstError
-from synapse.util.async import concurrently_execute
+from synapse.util.async_helpers import concurrently_execute
 from synapse.util.caches.snapshot_cache import SnapshotCache
 from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 from synapse.visibility import filter_events_for_client
 
 from ._base import BaseHandler
 
-import logging
-
-
 logger = logging.getLogger(__name__)
 
 
@@ -151,13 +148,15 @@ class InitialSyncHandler(BaseHandler):
             try:
                 if event.membership == Membership.JOIN:
                     room_end_token = now_token.room_key
-                    deferred_room_state = self.state_handler.get_current_state(
-                        event.room_id
+                    deferred_room_state = run_in_background(
+                        self.state_handler.get_current_state,
+                        event.room_id,
                     )
                 elif event.membership == Membership.LEAVE:
                     room_end_token = "s%d" % (event.stream_ordering,)
-                    deferred_room_state = self.store.get_state_for_events(
-                        [event.event_id], None
+                    deferred_room_state = run_in_background(
+                        self.store.get_state_for_events,
+                        [event.event_id], None,
                     )
                     deferred_room_state.addCallback(
                         lambda states: states[event.event_id]
@@ -373,6 +372,10 @@ class InitialSyncHandler(BaseHandler):
 
         @defer.inlineCallbacks
         def get_presence():
+            # If presence is disabled, return an empty list
+            if not self.hs.config.use_presence:
+                defer.returnValue([])
+
             states = yield presence_handler.get_states(
                 [m.user_id for m in room_members],
                 as_event=True,
@@ -390,19 +393,21 @@ class InitialSyncHandler(BaseHandler):
                 receipts = []
             defer.returnValue(receipts)
 
-        presence, receipts, (messages, token) = yield defer.gatherResults(
-            [
-                run_in_background(get_presence),
-                run_in_background(get_receipts),
-                run_in_background(
-                    self.store.get_recent_events_for_room,
-                    room_id,
-                    limit=limit,
-                    end_token=now_token.room_key,
-                )
-            ],
-            consumeErrors=True,
-        ).addErrback(unwrapFirstError)
+        presence, receipts, (messages, token) = yield make_deferred_yieldable(
+            defer.gatherResults(
+                [
+                    run_in_background(get_presence),
+                    run_in_background(get_receipts),
+                    run_in_background(
+                        self.store.get_recent_events_for_room,
+                        room_id,
+                        limit=limit,
+                        end_token=now_token.room_key,
+                    )
+                ],
+                consumeErrors=True,
+            ).addErrback(unwrapFirstError),
+        )
 
         messages = yield filter_events_for_client(
             self.store, user_id, messages, is_peeking=is_peeking,
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 1cb81b6cf8..e484061cc0 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -14,269 +14,50 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-import simplejson
 import sys
 
-from canonicaljson import encode_canonical_json
 import six
-from six import string_types, itervalues, iteritems
-from twisted.internet import defer, reactor
+from six import iteritems, itervalues, string_types
+
+from canonicaljson import encode_canonical_json, json
+
+from twisted.internet import defer
 from twisted.internet.defer import succeed
-from twisted.python.failure import Failure
 
-from synapse.api.constants import EventTypes, Membership, MAX_DEPTH
+from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
 from synapse.api.errors import (
-    AuthError, Codes, SynapseError,
+    AuthError,
+    Codes,
     ConsentNotGivenError,
+    NotFoundError,
+    SynapseError,
 )
 from synapse.api.urls import ConsentURIBuilder
 from synapse.crypto.event_signing import add_hashes_and_signatures
 from synapse.events.utils import serialize_event
 from synapse.events.validator import EventValidator
-from synapse.types import (
-    UserID, RoomAlias, RoomStreamToken,
-)
-from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter
+from synapse.replication.http.send_event import ReplicationSendEventRestServlet
+from synapse.types import RoomAlias, UserID
+from synapse.util.async_helpers import Linearizer
+from synapse.util.frozenutils import frozendict_json_encoder
 from synapse.util.logcontext import run_in_background
 from synapse.util.metrics import measure_func
-from synapse.util.frozenutils import frozendict_json_encoder
-from synapse.util.stringutils import random_string
 from synapse.visibility import filter_events_for_client
-from synapse.replication.http.send_event import send_event_to_master
 
 from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
 
-class PurgeStatus(object):
-    """Object tracking the status of a purge request
-
-    This class contains information on the progress of a purge request, for
-    return by get_purge_status.
-
-    Attributes:
-        status (int): Tracks whether this request has completed. One of
-            STATUS_{ACTIVE,COMPLETE,FAILED}
+class MessageHandler(object):
+    """Contains some read only APIs to get state about a room
     """
 
-    STATUS_ACTIVE = 0
-    STATUS_COMPLETE = 1
-    STATUS_FAILED = 2
-
-    STATUS_TEXT = {
-        STATUS_ACTIVE: "active",
-        STATUS_COMPLETE: "complete",
-        STATUS_FAILED: "failed",
-    }
-
-    def __init__(self):
-        self.status = PurgeStatus.STATUS_ACTIVE
-
-    def asdict(self):
-        return {
-            "status": PurgeStatus.STATUS_TEXT[self.status]
-        }
-
-
-class MessageHandler(BaseHandler):
-
     def __init__(self, hs):
-        super(MessageHandler, self).__init__(hs)
-        self.hs = hs
-        self.state = hs.get_state_handler()
+        self.auth = hs.get_auth()
         self.clock = hs.get_clock()
-
-        self.pagination_lock = ReadWriteLock()
-        self._purges_in_progress_by_room = set()
-        # map from purge id to PurgeStatus
-        self._purges_by_id = {}
-
-    def start_purge_history(self, room_id, token,
-                            delete_local_events=False):
-        """Start off a history purge on a room.
-
-        Args:
-            room_id (str): The room to purge from
-
-            token (str): topological token to delete events before
-            delete_local_events (bool): True to delete local events as well as
-                remote ones
-
-        Returns:
-            str: unique ID for this purge transaction.
-        """
-        if room_id in self._purges_in_progress_by_room:
-            raise SynapseError(
-                400,
-                "History purge already in progress for %s" % (room_id, ),
-            )
-
-        purge_id = random_string(16)
-
-        # we log the purge_id here so that it can be tied back to the
-        # request id in the log lines.
-        logger.info("[purge] starting purge_id %s", purge_id)
-
-        self._purges_by_id[purge_id] = PurgeStatus()
-        run_in_background(
-            self._purge_history,
-            purge_id, room_id, token, delete_local_events,
-        )
-        return purge_id
-
-    @defer.inlineCallbacks
-    def _purge_history(self, purge_id, room_id, token,
-                       delete_local_events):
-        """Carry out a history purge on a room.
-
-        Args:
-            purge_id (str): The id for this purge
-            room_id (str): The room to purge from
-            token (str): topological token to delete events before
-            delete_local_events (bool): True to delete local events as well as
-                remote ones
-
-        Returns:
-            Deferred
-        """
-        self._purges_in_progress_by_room.add(room_id)
-        try:
-            with (yield self.pagination_lock.write(room_id)):
-                yield self.store.purge_history(
-                    room_id, token, delete_local_events,
-                )
-            logger.info("[purge] complete")
-            self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
-        except Exception:
-            logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
-            self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
-        finally:
-            self._purges_in_progress_by_room.discard(room_id)
-
-            # remove the purge from the list 24 hours after it completes
-            def clear_purge():
-                del self._purges_by_id[purge_id]
-            reactor.callLater(24 * 3600, clear_purge)
-
-    def get_purge_status(self, purge_id):
-        """Get the current status of an active purge
-
-        Args:
-            purge_id (str): purge_id returned by start_purge_history
-
-        Returns:
-            PurgeStatus|None
-        """
-        return self._purges_by_id.get(purge_id)
-
-    @defer.inlineCallbacks
-    def get_messages(self, requester, room_id=None, pagin_config=None,
-                     as_client_event=True, event_filter=None):
-        """Get messages in a room.
-
-        Args:
-            requester (Requester): The user requesting messages.
-            room_id (str): The room they want messages from.
-            pagin_config (synapse.api.streams.PaginationConfig): The pagination
-                config rules to apply, if any.
-            as_client_event (bool): True to get events in client-server format.
-            event_filter (Filter): Filter to apply to results or None
-        Returns:
-            dict: Pagination API results
-        """
-        user_id = requester.user.to_string()
-
-        if pagin_config.from_token:
-            room_token = pagin_config.from_token.room_key
-        else:
-            pagin_config.from_token = (
-                yield self.hs.get_event_sources().get_current_token_for_room(
-                    room_id=room_id
-                )
-            )
-            room_token = pagin_config.from_token.room_key
-
-        room_token = RoomStreamToken.parse(room_token)
-
-        pagin_config.from_token = pagin_config.from_token.copy_and_replace(
-            "room_key", str(room_token)
-        )
-
-        source_config = pagin_config.get_source_config("room")
-
-        with (yield self.pagination_lock.read(room_id)):
-            membership, member_event_id = yield self._check_in_room_or_world_readable(
-                room_id, user_id
-            )
-
-            if source_config.direction == 'b':
-                # if we're going backwards, we might need to backfill. This
-                # requires that we have a topo token.
-                if room_token.topological:
-                    max_topo = room_token.topological
-                else:
-                    max_topo = yield self.store.get_max_topological_token(
-                        room_id, room_token.stream
-                    )
-
-                if membership == Membership.LEAVE:
-                    # If they have left the room then clamp the token to be before
-                    # they left the room, to save the effort of loading from the
-                    # database.
-                    leave_token = yield self.store.get_topological_token_for_event(
-                        member_event_id
-                    )
-                    leave_token = RoomStreamToken.parse(leave_token)
-                    if leave_token.topological < max_topo:
-                        source_config.from_key = str(leave_token)
-
-                yield self.hs.get_handlers().federation_handler.maybe_backfill(
-                    room_id, max_topo
-                )
-
-            events, next_key = yield self.store.paginate_room_events(
-                room_id=room_id,
-                from_key=source_config.from_key,
-                to_key=source_config.to_key,
-                direction=source_config.direction,
-                limit=source_config.limit,
-                event_filter=event_filter,
-            )
-
-            next_token = pagin_config.from_token.copy_and_replace(
-                "room_key", next_key
-            )
-
-        if not events:
-            defer.returnValue({
-                "chunk": [],
-                "start": pagin_config.from_token.to_string(),
-                "end": next_token.to_string(),
-            })
-
-        if event_filter:
-            events = event_filter.filter(events)
-
-        events = yield filter_events_for_client(
-            self.store,
-            user_id,
-            events,
-            is_peeking=(member_event_id is None),
-        )
-
-        time_now = self.clock.time_msec()
-
-        chunk = {
-            "chunk": [
-                serialize_event(e, time_now, as_client_event)
-                for e in events
-            ],
-            "start": pagin_config.from_token.to_string(),
-            "end": next_token.to_string(),
-        }
-
-        defer.returnValue(chunk)
+        self.state = hs.get_state_handler()
+        self.store = hs.get_datastore()
 
     @defer.inlineCallbacks
     def get_room_data(self, user_id=None, room_id=None,
@@ -290,12 +71,12 @@ class MessageHandler(BaseHandler):
         Raises:
             SynapseError if something went wrong.
         """
-        membership, membership_event_id = yield self._check_in_room_or_world_readable(
+        membership, membership_event_id = yield self.auth.check_in_room_or_world_readable(
             room_id, user_id
         )
 
         if membership == Membership.JOIN:
-            data = yield self.state_handler.get_current_state(
+            data = yield self.state.get_current_state(
                 room_id, event_type, state_key
             )
         elif membership == Membership.LEAVE:
@@ -308,53 +89,85 @@ class MessageHandler(BaseHandler):
         defer.returnValue(data)
 
     @defer.inlineCallbacks
-    def _check_in_room_or_world_readable(self, room_id, user_id):
-        try:
-            # check_user_was_in_room will return the most recent membership
-            # event for the user if:
-            #  * The user is a non-guest user, and was ever in the room
-            #  * The user is a guest user, and has joined the room
-            # else it will throw.
-            member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
-            defer.returnValue((member_event.membership, member_event.event_id))
-            return
-        except AuthError:
-            visibility = yield self.state_handler.get_current_state(
-                room_id, EventTypes.RoomHistoryVisibility, ""
-            )
-            if (
-                visibility and
-                visibility.content["history_visibility"] == "world_readable"
-            ):
-                defer.returnValue((Membership.JOIN, None))
-                return
-            raise AuthError(
-                403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
-            )
-
-    @defer.inlineCallbacks
-    def get_state_events(self, user_id, room_id, is_guest=False):
+    def get_state_events(
+        self, user_id, room_id, types=None, filtered_types=None,
+        at_token=None, is_guest=False,
+    ):
         """Retrieve all state events for a given room. If the user is
         joined to the room then return the current state. If the user has
-        left the room return the state events from when they left.
+        left the room return the state events from when they left. If an explicit
+        'at' parameter is passed, return the state events as of that event, if
+        visible.
 
         Args:
             user_id(str): The user requesting state events.
             room_id(str): The room ID to get all state events from.
+            types(list[(str, str|None)]|None): List of (type, state_key) tuples
+                which are used to filter the state fetched. If `state_key` is None,
+                all events are returned of the given type.
+                May be None, which matches any key.
+            filtered_types(list[str]|None): Only apply filtering via `types` to this
+                list of event types.  Other types of events are returned unfiltered.
+                If None, `types` filtering is applied to all events.
+            at_token(StreamToken|None): the stream token of the at which we are requesting
+                the stats. If the user is not allowed to view the state as of that
+                stream token, we raise a 403 SynapseError. If None, returns the current
+                state based on the current_state_events table.
+            is_guest(bool): whether this user is a guest
         Returns:
             A list of dicts representing state events. [{}, {}, {}]
+        Raises:
+            NotFoundError (404) if the at token does not yield an event
+
+            AuthError (403) if the user doesn't have permission to view
+            members of this room.
         """
-        membership, membership_event_id = yield self._check_in_room_or_world_readable(
-            room_id, user_id
-        )
+        if at_token:
+            # FIXME this claims to get the state at a stream position, but
+            # get_recent_events_for_room operates by topo ordering. This therefore
+            # does not reliably give you the state at the given stream position.
+            # (https://github.com/matrix-org/synapse/issues/3305)
+            last_events, _ = yield self.store.get_recent_events_for_room(
+                room_id, end_token=at_token.room_key, limit=1,
+            )
 
-        if membership == Membership.JOIN:
-            room_state = yield self.state_handler.get_current_state(room_id)
-        elif membership == Membership.LEAVE:
-            room_state = yield self.store.get_state_for_events(
-                [membership_event_id], None
+            if not last_events:
+                raise NotFoundError("Can't find event for token %s" % (at_token, ))
+
+            visible_events = yield filter_events_for_client(
+                self.store, user_id, last_events,
             )
-            room_state = room_state[membership_event_id]
+
+            event = last_events[0]
+            if visible_events:
+                room_state = yield self.store.get_state_for_events(
+                    [event.event_id], types, filtered_types=filtered_types,
+                )
+                room_state = room_state[event.event_id]
+            else:
+                raise AuthError(
+                    403,
+                    "User %s not allowed to view events in room %s at token %s" % (
+                        user_id, room_id, at_token,
+                    )
+                )
+        else:
+            membership, membership_event_id = (
+                yield self.auth.check_in_room_or_world_readable(
+                    room_id, user_id,
+                )
+            )
+
+            if membership == Membership.JOIN:
+                state_ids = yield self.store.get_filtered_current_state_ids(
+                    room_id, types, filtered_types=filtered_types,
+                )
+                room_state = yield self.store.get_events(state_ids.values())
+            elif membership == Membership.LEAVE:
+                room_state = yield self.store.get_state_for_events(
+                    [membership_event_id], types, filtered_types=filtered_types,
+                )
+                room_state = room_state[membership_event_id]
 
         now = self.clock.time_msec()
         defer.returnValue(
@@ -377,7 +190,7 @@ class MessageHandler(BaseHandler):
         if not requester.app_service:
             # We check AS auth after fetching the room membership, as it
             # requires us to pull out all joined members anyway.
-            membership, _ = yield self._check_in_room_or_world_readable(
+            membership, _ = yield self.auth.check_in_room_or_world_readable(
                 room_id, user_id
             )
             if membership != Membership.JOIN:
@@ -388,7 +201,7 @@ class MessageHandler(BaseHandler):
         users_with_profile = yield self.state.get_current_user_in_room(room_id)
 
         # If this is an AS, double check that they are allowed to see the members.
-        # This can either be because the AS user is in the room or becuase there
+        # This can either be because the AS user is in the room or because there
         # is a user in the room that the AS is "interested in"
         if requester.app_service and user_id not in users_with_profile:
             for uid in users_with_profile:
@@ -422,7 +235,7 @@ class EventCreationHandler(object):
         self.notifier = hs.get_notifier()
         self.config = hs.config
 
-        self.http_client = hs.get_simple_http_client()
+        self.send_event_to_master = ReplicationSendEventRestServlet.make_client(hs)
 
         # This is only used to get at ratelimit function, and maybe_kick_guest_users
         self.base_handler = BaseHandler(hs)
@@ -431,7 +244,7 @@ class EventCreationHandler(object):
 
         # We arbitrarily limit concurrent event creation for a room to 5.
         # This is to stop us from diverging history *too* much.
-        self.limiter = Limiter(max_count=5)
+        self.limiter = Linearizer(max_count=5, name="room_event_creation_limit")
 
         self.action_generator = hs.get_action_generator()
 
@@ -463,10 +276,14 @@ class EventCreationHandler(object):
                 where *hashes* is a map from algorithm to hash.
 
                 If None, they will be requested from the database.
-
+        Raises:
+            ResourceLimitError if server is blocked to some resource being
+            exceeded
         Returns:
             Tuple of created event (FrozenEvent), Context
         """
+        yield self.auth.check_auth_blocking(requester.user.to_string())
+
         builder = self.event_builder_factory.new(event_dict)
 
         self.validator.validate_new(builder)
@@ -491,7 +308,7 @@ class EventCreationHandler(object):
                         target, e
                     )
 
-        is_exempt = yield self._is_exempt_from_privacy_policy(builder)
+        is_exempt = yield self._is_exempt_from_privacy_policy(builder, requester)
         if not is_exempt:
             yield self.assert_accepted_privacy_policy(requester)
 
@@ -509,12 +326,13 @@ class EventCreationHandler(object):
 
         defer.returnValue((event, context))
 
-    def _is_exempt_from_privacy_policy(self, builder):
+    def _is_exempt_from_privacy_policy(self, builder, requester):
         """"Determine if an event to be sent is exempt from having to consent
         to the privacy policy
 
         Args:
             builder (synapse.events.builder.EventBuilder): event being created
+            requester (Requster): user requesting this event
 
         Returns:
             Deferred[bool]: true if the event can be sent without the user
@@ -525,6 +343,9 @@ class EventCreationHandler(object):
             membership = builder.content.get("membership", None)
             if membership == Membership.JOIN:
                 return self._is_server_notices_room(builder.room_id)
+            elif membership == Membership.LEAVE:
+                # the user is always allowed to leave (but not kick people)
+                return builder.state_key == requester.user.to_string()
         return succeed(False)
 
     @defer.inlineCallbacks
@@ -630,7 +451,8 @@ class EventCreationHandler(object):
         If so, returns the version of the event in context.
         Otherwise, returns None.
         """
-        prev_event_id = context.prev_state_ids.get((event.type, event.state_key))
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_event_id = prev_state_ids.get((event.type, event.state_key))
         prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
         if not prev_event:
             return
@@ -752,8 +574,8 @@ class EventCreationHandler(object):
         event = builder.build()
 
         logger.debug(
-            "Created event %s with state: %s",
-            event.event_id, context.prev_state_ids,
+            "Created event %s",
+            event.event_id,
         )
 
         defer.returnValue(
@@ -793,7 +615,7 @@ class EventCreationHandler(object):
         # Ensure that we can round trip before trying to persist in db
         try:
             dump = frozendict_json_encoder.encode(event.content)
-            simplejson.loads(dump)
+            json.loads(dump)
         except Exception:
             logger.exception("Failed to encode content: %r", event.content)
             raise
@@ -805,10 +627,9 @@ class EventCreationHandler(object):
         try:
             # If we're a worker we need to hit out to the master.
             if self.config.worker_app:
-                yield send_event_to_master(
-                    self.http_client,
-                    host=self.config.worker_replication_host,
-                    port=self.config.worker_replication_http_port,
+                yield self.send_event_to_master(
+                    event_id=event.event_id,
+                    store=self.store,
                     requester=requester,
                     event=event,
                     context=context,
@@ -883,9 +704,11 @@ class EventCreationHandler(object):
                         e.sender == event.sender
                     )
 
+                current_state_ids = yield context.get_current_state_ids(self.store)
+
                 state_to_include_ids = [
                     e_id
-                    for k, e_id in iteritems(context.current_state_ids)
+                    for k, e_id in iteritems(current_state_ids)
                     if k[0] in self.hs.config.room_invite_state_types
                     or k == (EventTypes.Member, event.sender)
                 ]
@@ -921,8 +744,9 @@ class EventCreationHandler(object):
                     )
 
         if event.type == EventTypes.Redaction:
+            prev_state_ids = yield context.get_prev_state_ids(self.store)
             auth_events_ids = yield self.auth.compute_auth_events(
-                event, context.prev_state_ids, for_verification=True,
+                event, prev_state_ids, for_verification=True,
             )
             auth_events = yield self.store.get_events(auth_events_ids)
             auth_events = {
@@ -942,26 +766,23 @@ class EventCreationHandler(object):
                         "You don't have permission to redact events"
                     )
 
-        if event.type == EventTypes.Create and context.prev_state_ids:
-            raise AuthError(
-                403,
-                "Changing the room create event is forbidden",
-            )
+        if event.type == EventTypes.Create:
+            prev_state_ids = yield context.get_prev_state_ids(self.store)
+            if prev_state_ids:
+                raise AuthError(
+                    403,
+                    "Changing the room create event is forbidden",
+                )
 
         (event_stream_id, max_stream_id) = yield self.store.persist_event(
             event, context=context
         )
 
-        # this intentionally does not yield: we don't care about the result
-        # and don't need to wait for it.
-        run_in_background(
-            self.pusher_pool.on_new_notifications,
-            event_stream_id, max_stream_id
+        self.pusher_pool.on_new_notifications(
+            event_stream_id, max_stream_id,
         )
 
-        @defer.inlineCallbacks
         def _notify():
-            yield run_on_reactor()
             try:
                 self.notifier.on_new_room_event(
                     event, event_stream_id, max_stream_id,
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
new file mode 100644
index 0000000000..5170d093e3
--- /dev/null
+++ b/synapse/handlers/pagination.py
@@ -0,0 +1,298 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 - 2016 OpenMarket Ltd
+# Copyright 2017 - 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from twisted.internet import defer
+from twisted.python.failure import Failure
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.api.errors import SynapseError
+from synapse.events.utils import serialize_event
+from synapse.types import RoomStreamToken
+from synapse.util.async_helpers import ReadWriteLock
+from synapse.util.logcontext import run_in_background
+from synapse.util.stringutils import random_string
+from synapse.visibility import filter_events_for_client
+
+logger = logging.getLogger(__name__)
+
+
+class PurgeStatus(object):
+    """Object tracking the status of a purge request
+
+    This class contains information on the progress of a purge request, for
+    return by get_purge_status.
+
+    Attributes:
+        status (int): Tracks whether this request has completed. One of
+            STATUS_{ACTIVE,COMPLETE,FAILED}
+    """
+
+    STATUS_ACTIVE = 0
+    STATUS_COMPLETE = 1
+    STATUS_FAILED = 2
+
+    STATUS_TEXT = {
+        STATUS_ACTIVE: "active",
+        STATUS_COMPLETE: "complete",
+        STATUS_FAILED: "failed",
+    }
+
+    def __init__(self):
+        self.status = PurgeStatus.STATUS_ACTIVE
+
+    def asdict(self):
+        return {
+            "status": PurgeStatus.STATUS_TEXT[self.status]
+        }
+
+
+class PaginationHandler(object):
+    """Handles pagination and purge history requests.
+
+    These are in the same handler due to the fact we need to block clients
+    paginating during a purge.
+    """
+
+    def __init__(self, hs):
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.store = hs.get_datastore()
+        self.clock = hs.get_clock()
+
+        self.pagination_lock = ReadWriteLock()
+        self._purges_in_progress_by_room = set()
+        # map from purge id to PurgeStatus
+        self._purges_by_id = {}
+
+    def start_purge_history(self, room_id, token,
+                            delete_local_events=False):
+        """Start off a history purge on a room.
+
+        Args:
+            room_id (str): The room to purge from
+
+            token (str): topological token to delete events before
+            delete_local_events (bool): True to delete local events as well as
+                remote ones
+
+        Returns:
+            str: unique ID for this purge transaction.
+        """
+        if room_id in self._purges_in_progress_by_room:
+            raise SynapseError(
+                400,
+                "History purge already in progress for %s" % (room_id, ),
+            )
+
+        purge_id = random_string(16)
+
+        # we log the purge_id here so that it can be tied back to the
+        # request id in the log lines.
+        logger.info("[purge] starting purge_id %s", purge_id)
+
+        self._purges_by_id[purge_id] = PurgeStatus()
+        run_in_background(
+            self._purge_history,
+            purge_id, room_id, token, delete_local_events,
+        )
+        return purge_id
+
+    @defer.inlineCallbacks
+    def _purge_history(self, purge_id, room_id, token,
+                       delete_local_events):
+        """Carry out a history purge on a room.
+
+        Args:
+            purge_id (str): The id for this purge
+            room_id (str): The room to purge from
+            token (str): topological token to delete events before
+            delete_local_events (bool): True to delete local events as well as
+                remote ones
+
+        Returns:
+            Deferred
+        """
+        self._purges_in_progress_by_room.add(room_id)
+        try:
+            with (yield self.pagination_lock.write(room_id)):
+                yield self.store.purge_history(
+                    room_id, token, delete_local_events,
+                )
+            logger.info("[purge] complete")
+            self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
+        except Exception:
+            logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
+            self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
+        finally:
+            self._purges_in_progress_by_room.discard(room_id)
+
+            # remove the purge from the list 24 hours after it completes
+            def clear_purge():
+                del self._purges_by_id[purge_id]
+            self.hs.get_reactor().callLater(24 * 3600, clear_purge)
+
+    def get_purge_status(self, purge_id):
+        """Get the current status of an active purge
+
+        Args:
+            purge_id (str): purge_id returned by start_purge_history
+
+        Returns:
+            PurgeStatus|None
+        """
+        return self._purges_by_id.get(purge_id)
+
+    @defer.inlineCallbacks
+    def get_messages(self, requester, room_id=None, pagin_config=None,
+                     as_client_event=True, event_filter=None):
+        """Get messages in a room.
+
+        Args:
+            requester (Requester): The user requesting messages.
+            room_id (str): The room they want messages from.
+            pagin_config (synapse.api.streams.PaginationConfig): The pagination
+                config rules to apply, if any.
+            as_client_event (bool): True to get events in client-server format.
+            event_filter (Filter): Filter to apply to results or None
+        Returns:
+            dict: Pagination API results
+        """
+        user_id = requester.user.to_string()
+
+        if pagin_config.from_token:
+            room_token = pagin_config.from_token.room_key
+        else:
+            pagin_config.from_token = (
+                yield self.hs.get_event_sources().get_current_token_for_room(
+                    room_id=room_id
+                )
+            )
+            room_token = pagin_config.from_token.room_key
+
+        room_token = RoomStreamToken.parse(room_token)
+
+        pagin_config.from_token = pagin_config.from_token.copy_and_replace(
+            "room_key", str(room_token)
+        )
+
+        source_config = pagin_config.get_source_config("room")
+
+        with (yield self.pagination_lock.read(room_id)):
+            membership, member_event_id = yield self.auth.check_in_room_or_world_readable(
+                room_id, user_id
+            )
+
+            if source_config.direction == 'b':
+                # if we're going backwards, we might need to backfill. This
+                # requires that we have a topo token.
+                if room_token.topological:
+                    max_topo = room_token.topological
+                else:
+                    max_topo = yield self.store.get_max_topological_token(
+                        room_id, room_token.stream
+                    )
+
+                if membership == Membership.LEAVE:
+                    # If they have left the room then clamp the token to be before
+                    # they left the room, to save the effort of loading from the
+                    # database.
+                    leave_token = yield self.store.get_topological_token_for_event(
+                        member_event_id
+                    )
+                    leave_token = RoomStreamToken.parse(leave_token)
+                    if leave_token.topological < max_topo:
+                        source_config.from_key = str(leave_token)
+
+                yield self.hs.get_handlers().federation_handler.maybe_backfill(
+                    room_id, max_topo
+                )
+
+            events, next_key = yield self.store.paginate_room_events(
+                room_id=room_id,
+                from_key=source_config.from_key,
+                to_key=source_config.to_key,
+                direction=source_config.direction,
+                limit=source_config.limit,
+                event_filter=event_filter,
+            )
+
+            next_token = pagin_config.from_token.copy_and_replace(
+                "room_key", next_key
+            )
+
+        if not events:
+            defer.returnValue({
+                "chunk": [],
+                "start": pagin_config.from_token.to_string(),
+                "end": next_token.to_string(),
+            })
+
+        if event_filter:
+            events = event_filter.filter(events)
+
+        events = yield filter_events_for_client(
+            self.store,
+            user_id,
+            events,
+            is_peeking=(member_event_id is None),
+        )
+
+        state = None
+        if event_filter and event_filter.lazy_load_members():
+            # TODO: remove redundant members
+
+            types = [
+                (EventTypes.Member, state_key)
+                for state_key in set(
+                    event.sender  # FIXME: we also care about invite targets etc.
+                    for event in events
+                )
+            ]
+
+            state_ids = yield self.store.get_state_ids_for_event(
+                events[0].event_id, types=types,
+            )
+
+            if state_ids:
+                state = yield self.store.get_events(list(state_ids.values()))
+
+            if state:
+                state = yield filter_events_for_client(
+                    self.store,
+                    user_id,
+                    state.values(),
+                    is_peeking=(member_event_id is None),
+                )
+
+        time_now = self.clock.time_msec()
+
+        chunk = {
+            "chunk": [
+                serialize_event(e, time_now, as_client_event)
+                for e in events
+            ],
+            "start": pagin_config.from_token.to_string(),
+            "end": next_token.to_string(),
+        }
+
+        if state:
+            chunk["state"] = [
+                serialize_event(e, time_now, as_client_event)
+                for e in state
+            ]
+
+        defer.returnValue(chunk)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 7fe568132f..ba3856674d 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -22,27 +22,26 @@ The methods that define policy are:
     - should_notify
 """
 
-from twisted.internet import defer, reactor
+import logging
 from contextlib import contextmanager
 
-from six import itervalues, iteritems
+from six import iteritems, itervalues
+
+from prometheus_client import Counter
+
+from twisted.internet import defer
 
-from synapse.api.errors import SynapseError
 from synapse.api.constants import PresenceState
+from synapse.api.errors import SynapseError
+from synapse.metrics import LaterGauge
 from synapse.storage.presence import UserPresenceState
-
+from synapse.types import UserID, get_domain_from_id
+from synapse.util.async_helpers import Linearizer
 from synapse.util.caches.descriptors import cachedInlineCallbacks
-from synapse.util.async import Linearizer
 from synapse.util.logcontext import run_in_background
 from synapse.util.logutils import log_function
 from synapse.util.metrics import Measure
 from synapse.util.wheel_timer import WheelTimer
-from synapse.types import UserID, get_domain_from_id
-from synapse.metrics import LaterGauge
-
-import logging
-
-from prometheus_client import Counter
 
 logger = logging.getLogger(__name__)
 
@@ -96,6 +95,7 @@ class PresenceHandler(object):
         Args:
             hs (synapse.server.HomeServer):
         """
+        self.hs = hs
         self.is_mine = hs.is_mine
         self.is_mine_id = hs.is_mine_id
         self.clock = hs.get_clock()
@@ -179,7 +179,7 @@ class PresenceHandler(object):
         # have not yet been persisted
         self.unpersisted_users_changes = set()
 
-        reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
+        hs.get_reactor().addSystemEventTrigger("before", "shutdown", self._on_shutdown)
 
         self.serial_to_user = {}
         self._next_serial = 1
@@ -231,6 +231,10 @@ class PresenceHandler(object):
         earlier than they should when synapse is restarted. This affect of this
         is some spurious presence changes that will self-correct.
         """
+        # If the DB pool has already terminated, don't try updating
+        if not self.hs.get_db_pool().running:
+            return
+
         logger.info(
             "Performing _on_shutdown. Persisting %d unpersisted changes",
             len(self.user_to_current_state)
@@ -391,6 +395,10 @@ class PresenceHandler(object):
         """We've seen the user do something that indicates they're interacting
         with the app.
         """
+        # If presence is disabled, no-op
+        if not self.hs.config.use_presence:
+            return
+
         user_id = user.to_string()
 
         bump_active_time_counter.inc()
@@ -420,6 +428,11 @@ class PresenceHandler(object):
                 Useful for streams that are not associated with an actual
                 client that is being used by a user.
         """
+        # Override if it should affect the user's presence, if presence is
+        # disabled.
+        if not self.hs.config.use_presence:
+            affect_presence = False
+
         if affect_presence:
             curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
             self.user_to_num_current_syncs[user_id] = curr_sync + 1
@@ -465,13 +478,16 @@ class PresenceHandler(object):
         Returns:
             set(str): A set of user_id strings.
         """
-        syncing_user_ids = {
-            user_id for user_id, count in self.user_to_num_current_syncs.items()
-            if count
-        }
-        for user_ids in self.external_process_to_current_syncs.values():
-            syncing_user_ids.update(user_ids)
-        return syncing_user_ids
+        if self.hs.config.use_presence:
+            syncing_user_ids = {
+                user_id for user_id, count in self.user_to_num_current_syncs.items()
+                if count
+            }
+            for user_ids in self.external_process_to_current_syncs.values():
+                syncing_user_ids.update(user_ids)
+            return syncing_user_ids
+        else:
+            return set()
 
     @defer.inlineCallbacks
     def update_external_syncs_row(self, process_id, user_id, is_syncing, sync_time_msec):
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 3465a787ab..75b8b7ce6a 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -17,19 +17,31 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.api.errors import SynapseError, AuthError, CodeMessageException
+from synapse.api.errors import (
+    AuthError,
+    CodeMessageException,
+    Codes,
+    StoreError,
+    SynapseError,
+)
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.types import UserID, get_domain_from_id
+
 from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
 
-class ProfileHandler(BaseHandler):
-    PROFILE_UPDATE_MS = 60 * 1000
-    PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
+class BaseProfileHandler(BaseHandler):
+    """Handles fetching and updating user profile information.
+
+    BaseProfileHandler can be instantiated directly on workers and will
+    delegate to master when necessary. The master process should use the
+    subclass MasterProfileHandler
+    """
 
     def __init__(self, hs):
-        super(ProfileHandler, self).__init__(hs)
+        super(BaseProfileHandler, self).__init__(hs)
 
         self.federation = hs.get_federation_client()
         hs.get_federation_registry().register_query_handler(
@@ -38,21 +50,21 @@ class ProfileHandler(BaseHandler):
 
         self.user_directory_handler = hs.get_user_directory_handler()
 
-        if hs.config.worker_app is None:
-            self.clock.looping_call(
-                self._update_remote_profile_cache, self.PROFILE_UPDATE_MS,
-            )
-
     @defer.inlineCallbacks
     def get_profile(self, user_id):
         target_user = UserID.from_string(user_id)
         if self.hs.is_mine(target_user):
-            displayname = yield self.store.get_profile_displayname(
-                target_user.localpart
-            )
-            avatar_url = yield self.store.get_profile_avatar_url(
-                target_user.localpart
-            )
+            try:
+                displayname = yield self.store.get_profile_displayname(
+                    target_user.localpart
+                )
+                avatar_url = yield self.store.get_profile_avatar_url(
+                    target_user.localpart
+                )
+            except StoreError as e:
+                if e.code == 404:
+                    raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
+                raise
 
             defer.returnValue({
                 "displayname": displayname,
@@ -72,7 +84,6 @@ class ProfileHandler(BaseHandler):
             except CodeMessageException as e:
                 if e.code != 404:
                     logger.exception("Failed to get displayname")
-
                 raise
 
     @defer.inlineCallbacks
@@ -83,12 +94,17 @@ class ProfileHandler(BaseHandler):
         """
         target_user = UserID.from_string(user_id)
         if self.hs.is_mine(target_user):
-            displayname = yield self.store.get_profile_displayname(
-                target_user.localpart
-            )
-            avatar_url = yield self.store.get_profile_avatar_url(
-                target_user.localpart
-            )
+            try:
+                displayname = yield self.store.get_profile_displayname(
+                    target_user.localpart
+                )
+                avatar_url = yield self.store.get_profile_avatar_url(
+                    target_user.localpart
+                )
+            except StoreError as e:
+                if e.code == 404:
+                    raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
+                raise
 
             defer.returnValue({
                 "displayname": displayname,
@@ -101,9 +117,14 @@ class ProfileHandler(BaseHandler):
     @defer.inlineCallbacks
     def get_displayname(self, target_user):
         if self.hs.is_mine(target_user):
-            displayname = yield self.store.get_profile_displayname(
-                target_user.localpart
-            )
+            try:
+                displayname = yield self.store.get_profile_displayname(
+                    target_user.localpart
+                )
+            except StoreError as e:
+                if e.code == 404:
+                    raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
+                raise
 
             defer.returnValue(displayname)
         else:
@@ -120,7 +141,6 @@ class ProfileHandler(BaseHandler):
             except CodeMessageException as e:
                 if e.code != 404:
                     logger.exception("Failed to get displayname")
-
                 raise
             except Exception:
                 logger.exception("Failed to get displayname")
@@ -155,10 +175,14 @@ class ProfileHandler(BaseHandler):
     @defer.inlineCallbacks
     def get_avatar_url(self, target_user):
         if self.hs.is_mine(target_user):
-            avatar_url = yield self.store.get_profile_avatar_url(
-                target_user.localpart
-            )
-
+            try:
+                avatar_url = yield self.store.get_profile_avatar_url(
+                    target_user.localpart
+                )
+            except StoreError as e:
+                if e.code == 404:
+                    raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
+                raise
             defer.returnValue(avatar_url)
         else:
             try:
@@ -211,16 +235,20 @@ class ProfileHandler(BaseHandler):
         just_field = args.get("field", None)
 
         response = {}
+        try:
+            if just_field is None or just_field == "displayname":
+                response["displayname"] = yield self.store.get_profile_displayname(
+                    user.localpart
+                )
 
-        if just_field is None or just_field == "displayname":
-            response["displayname"] = yield self.store.get_profile_displayname(
-                user.localpart
-            )
-
-        if just_field is None or just_field == "avatar_url":
-            response["avatar_url"] = yield self.store.get_profile_avatar_url(
-                user.localpart
-            )
+            if just_field is None or just_field == "avatar_url":
+                response["avatar_url"] = yield self.store.get_profile_avatar_url(
+                    user.localpart
+                )
+        except StoreError as e:
+            if e.code == 404:
+                raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
+            raise
 
         defer.returnValue(response)
 
@@ -253,6 +281,26 @@ class ProfileHandler(BaseHandler):
                     room_id, str(e.message)
                 )
 
+
+class MasterProfileHandler(BaseProfileHandler):
+    PROFILE_UPDATE_MS = 60 * 1000
+    PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
+
+    def __init__(self, hs):
+        super(MasterProfileHandler, self).__init__(hs)
+
+        assert hs.config.worker_app is None
+
+        self.clock.looping_call(
+            self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
+        )
+
+    def _start_update_remote_profile_cache(self):
+        return run_as_background_process(
+            "Update remote profile", self._update_remote_profile_cache,
+        )
+
+    @defer.inlineCallbacks
     def _update_remote_profile_cache(self):
         """Called periodically to check profiles of remote users we haven't
         checked in a while.
diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py
index 5142ae153d..32108568c6 100644
--- a/synapse/handlers/read_marker.py
+++ b/synapse/handlers/read_marker.py
@@ -13,13 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseHandler
+import logging
 
 from twisted.internet import defer
 
-from synapse.util.async import Linearizer
+from synapse.util.async_helpers import Linearizer
+
+from ._base import BaseHandler
 
-import logging
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 2e0672161c..a6f3181f09 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -12,17 +12,14 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from synapse.util import logcontext
-
-from ._base import BaseHandler
+import logging
 
 from twisted.internet import defer
 
-from synapse.util.logcontext import PreserveLoggingContext
 from synapse.types import get_domain_from_id
+from synapse.util import logcontext
 
-import logging
-
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
@@ -118,16 +115,15 @@ class ReceiptsHandler(BaseHandler):
 
         affected_room_ids = list(set([r["room_id"] for r in receipts]))
 
-        with PreserveLoggingContext():
-            self.notifier.on_new_event(
-                "receipt_key", max_batch_id, rooms=affected_room_ids
-            )
-            # Note that the min here shouldn't be relied upon to be accurate.
-            self.hs.get_pusherpool().on_new_receipts(
-                min_batch_id, max_batch_id, affected_room_ids
-            )
+        self.notifier.on_new_event(
+            "receipt_key", max_batch_id, rooms=affected_room_ids
+        )
+        # Note that the min here shouldn't be relied upon to be accurate.
+        self.hs.get_pusherpool().on_new_receipts(
+            min_batch_id, max_batch_id, affected_room_ids,
+        )
 
-            defer.returnValue(True)
+        defer.returnValue(True)
 
     @logcontext.preserve_fn   # caller should not yield on this
     @defer.inlineCallbacks
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 7e52adda3c..1e53f2c635 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -18,14 +18,19 @@ import logging
 
 from twisted.internet import defer
 
+from synapse import types
 from synapse.api.errors import (
-    AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError
+    AuthError,
+    Codes,
+    InvalidCaptchaError,
+    RegistrationError,
+    SynapseError,
 )
 from synapse.http.client import CaptchaServerHttpClient
-from synapse import types
-from synapse.types import UserID, create_requester, RoomID, RoomAlias
-from synapse.util.async import run_on_reactor, Linearizer
+from synapse.types import RoomAlias, RoomID, UserID, create_requester
+from synapse.util.async_helpers import Linearizer
 from synapse.util.threepids import check_3pid_allowed
+
 from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
@@ -40,7 +45,7 @@ class RegistrationHandler(BaseHandler):
             hs (synapse.server.HomeServer):
         """
         super(RegistrationHandler, self).__init__(hs)
-
+        self.hs = hs
         self.auth = hs.get_auth()
         self._auth_handler = hs.get_auth_handler()
         self.profile_handler = hs.get_profile_handler()
@@ -120,13 +125,14 @@ class RegistrationHandler(BaseHandler):
         guest_access_token=None,
         make_guest=False,
         admin=False,
+        threepid=None,
     ):
         """Registers a new client on the server.
 
         Args:
             localpart : The local part of the user ID to register. If None,
               one will be generated.
-            password (str) : The password to assign to this user so they can
+            password (unicode) : The password to assign to this user so they can
               login again. This can be None which means they cannot login again
               via a password (e.g. the user is an application service user).
             generate_token (bool): Whether a new access token should be
@@ -139,7 +145,8 @@ class RegistrationHandler(BaseHandler):
         Raises:
             RegistrationError if there was a problem registering.
         """
-        yield run_on_reactor()
+
+        yield self.auth.check_auth_blocking(threepid=threepid)
         password_hash = None
         if password:
             password_hash = yield self.auth_handler().hash(password)
@@ -284,6 +291,7 @@ class RegistrationHandler(BaseHandler):
                 400,
                 "User ID can only contain characters a-z, 0-9, or '=_-./'",
             )
+        yield self.auth.check_auth_blocking()
         user = UserID(localpart, self.hs.hostname)
         user_id = user.to_string()
 
@@ -431,11 +439,9 @@ class RegistrationHandler(BaseHandler):
         Raises:
             RegistrationError if there was a problem registering.
         """
-        yield run_on_reactor()
-
         if localpart is None:
             raise SynapseError(400, "Request must include user id")
-
+        yield self.auth.check_auth_blocking()
         need_register = True
 
         try:
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 2abd63ad05..c3f820b975 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -15,23 +15,29 @@
 # limitations under the License.
 
 """Contains functions for performing events on rooms."""
-from twisted.internet import defer
+import itertools
+import logging
+import math
+import string
+from collections import OrderedDict
 
-from ._base import BaseHandler
+from six import string_types
+
+from twisted.internet import defer
 
-from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken
 from synapse.api.constants import (
-    EventTypes, JoinRules, RoomCreationPreset
+    DEFAULT_ROOM_VERSION,
+    KNOWN_ROOM_VERSIONS,
+    EventTypes,
+    JoinRules,
+    RoomCreationPreset,
 )
-from synapse.api.errors import AuthError, StoreError, SynapseError
+from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
+from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
 from synapse.util import stringutils
 from synapse.visibility import filter_events_for_client
 
-from collections import OrderedDict
-
-import logging
-import math
-import string
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
@@ -92,15 +98,34 @@ class RoomCreationHandler(BaseHandler):
         Raises:
             SynapseError if the room ID couldn't be stored, or something went
             horribly wrong.
+            ResourceLimitError if server is blocked to some resource being
+            exceeded
         """
         user_id = requester.user.to_string()
 
+        self.auth.check_auth_blocking(user_id)
+
         if not self.spam_checker.user_may_create_room(user_id):
             raise SynapseError(403, "You are not permitted to create rooms")
 
         if ratelimit:
             yield self.ratelimit(requester)
 
+        room_version = config.get("room_version", DEFAULT_ROOM_VERSION)
+        if not isinstance(room_version, string_types):
+            raise SynapseError(
+                400,
+                "room_version must be a string",
+                Codes.BAD_JSON,
+            )
+
+        if room_version not in KNOWN_ROOM_VERSIONS:
+            raise SynapseError(
+                400,
+                "Your homeserver does not support this room version",
+                Codes.UNSUPPORTED_ROOM_VERSION,
+            )
+
         if "room_alias_name" in config:
             for wchar in string.whitespace:
                 if wchar in config["room_alias_name"]:
@@ -115,7 +140,11 @@ class RoomCreationHandler(BaseHandler):
             )
 
             if mapping:
-                raise SynapseError(400, "Room alias already taken")
+                raise SynapseError(
+                    400,
+                    "Room alias already taken",
+                    Codes.ROOM_IN_USE
+                )
         else:
             room_alias = None
 
@@ -182,6 +211,9 @@ class RoomCreationHandler(BaseHandler):
 
         creation_content = config.get("creation_content", {})
 
+        # override any attempt to set room versions via the creation_content
+        creation_content["room_version"] = room_version
+
         room_member_handler = self.hs.get_room_member_handler()
 
         yield self._send_events_for_new_room(
@@ -394,9 +426,13 @@ class RoomCreationHandler(BaseHandler):
             )
 
 
-class RoomContextHandler(BaseHandler):
+class RoomContextHandler(object):
+    def __init__(self, hs):
+        self.hs = hs
+        self.store = hs.get_datastore()
+
     @defer.inlineCallbacks
-    def get_event_context(self, user, room_id, event_id, limit):
+    def get_event_context(self, user, room_id, event_id, limit, event_filter):
         """Retrieves events, pagination tokens and state around a given event
         in a room.
 
@@ -406,6 +442,8 @@ class RoomContextHandler(BaseHandler):
             event_id (str)
             limit (int): The maximum number of events to return in total
                 (excluding state).
+            event_filter (Filter|None): the filter to apply to the events returned
+                (excluding the target event_id)
 
         Returns:
             dict, or None if the event isn't found
@@ -413,8 +451,6 @@ class RoomContextHandler(BaseHandler):
         before_limit = math.floor(limit / 2.)
         after_limit = limit - before_limit
 
-        now_token = yield self.hs.get_event_sources().get_current_token()
-
         users = yield self.store.get_users_in_room(room_id)
         is_peeking = user.to_string() not in users
 
@@ -440,7 +476,7 @@ class RoomContextHandler(BaseHandler):
             )
 
         results = yield self.store.get_events_around(
-            room_id, event_id, before_limit, after_limit
+            room_id, event_id, before_limit, after_limit, event_filter
         )
 
         results["events_before"] = yield filter_evts(results["events_before"])
@@ -452,16 +488,35 @@ class RoomContextHandler(BaseHandler):
         else:
             last_event_id = event_id
 
+        types = None
+        filtered_types = None
+        if event_filter and event_filter.lazy_load_members():
+            members = set(ev.sender for ev in itertools.chain(
+                results["events_before"],
+                (results["event"],),
+                results["events_after"],
+            ))
+            filtered_types = [EventTypes.Member]
+            types = [(EventTypes.Member, member) for member in members]
+
+        # XXX: why do we return the state as of the last event rather than the
+        # first? Shouldn't we be consistent with /sync?
+        # https://github.com/matrix-org/matrix-doc/issues/687
+
         state = yield self.store.get_state_for_events(
-            [last_event_id], None
+            [last_event_id], types, filtered_types=filtered_types,
         )
         results["state"] = list(state[last_event_id].values())
 
-        results["start"] = now_token.copy_and_replace(
+        # We use a dummy token here as we only care about the room portion of
+        # the token, which we replace.
+        token = StreamToken.START
+
+        results["start"] = token.copy_and_replace(
             "room_key", results["start"]
         ).to_string()
 
-        results["end"] = now_token.copy_and_replace(
+        results["end"] = token.copy_and_replace(
             "room_key", results["end"]
         ).to_string()
 
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index fc507cef36..37e41afd61 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -13,26 +13,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
+import logging
+from collections import namedtuple
 
 from six import iteritems
 from six.moves import range
 
-from ._base import BaseHandler
+import msgpack
+from unpaddedbase64 import decode_base64, encode_base64
+
+from twisted.internet import defer
 
-from synapse.api.constants import (
-    EventTypes, JoinRules,
-)
-from synapse.util.async import concurrently_execute
+from synapse.api.constants import EventTypes, JoinRules
+from synapse.types import ThirdPartyInstanceID
+from synapse.util.async_helpers import concurrently_execute
 from synapse.util.caches.descriptors import cachedInlineCallbacks
 from synapse.util.caches.response_cache import ResponseCache
-from synapse.types import ThirdPartyInstanceID
-
-from collections import namedtuple
-from unpaddedbase64 import encode_base64, decode_base64
 
-import logging
-import msgpack
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
@@ -40,7 +38,7 @@ REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000
 
 
 # This is used to indicate we should only return rooms published to the main list.
-EMTPY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
+EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
 
 
 class RoomListHandler(BaseHandler):
@@ -52,7 +50,7 @@ class RoomListHandler(BaseHandler):
 
     def get_local_public_room_list(self, limit=None, since_token=None,
                                    search_filter=None,
-                                   network_tuple=EMTPY_THIRD_PARTY_ID,):
+                                   network_tuple=EMPTY_THIRD_PARTY_ID,):
         """Generate a local public room list.
 
         There are multiple different lists: the main one plus one per third
@@ -89,7 +87,7 @@ class RoomListHandler(BaseHandler):
     @defer.inlineCallbacks
     def _get_public_room_list(self, limit=None, since_token=None,
                               search_filter=None,
-                              network_tuple=EMTPY_THIRD_PARTY_ID,):
+                              network_tuple=EMPTY_THIRD_PARTY_ID,):
         if since_token and since_token != "END":
             since_token = RoomListNextBatch.from_token(since_token)
         else:
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index f930e939e8..f643619047 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -21,19 +21,17 @@ from six.moves import http_client
 
 from signedjson.key import decode_verify_key_bytes
 from signedjson.sign import verify_signed_json
-from twisted.internet import defer
 from unpaddedbase64 import decode_base64
 
+from twisted.internet import defer
+
 import synapse.server
 import synapse.types
-from synapse.api.constants import (
-    EventTypes, Membership,
-)
-from synapse.api.errors import AuthError, SynapseError, Codes
-from synapse.types import UserID, RoomID
-from synapse.util.async import Linearizer
-from synapse.util.distributor import user_left_room, user_joined_room
-
+from synapse.api.constants import EventTypes, Membership
+from synapse.api.errors import AuthError, Codes, SynapseError
+from synapse.types import RoomID, UserID
+from synapse.util.async_helpers import Linearizer
+from synapse.util.distributor import user_joined_room, user_left_room
 
 logger = logging.getLogger(__name__)
 
@@ -203,7 +201,9 @@ class RoomMemberHandler(object):
             ratelimit=ratelimit,
         )
 
-        prev_member_event_id = context.prev_state_ids.get(
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
+
+        prev_member_event_id = prev_state_ids.get(
             (EventTypes.Member, target.to_string()),
             None
         )
@@ -344,6 +344,7 @@ class RoomMemberHandler(object):
         latest_event_ids = (
             event_id for (event_id, _, _) in prev_events_and_hashes
         )
+
         current_state_ids = yield self.state_handler.get_current_state_ids(
             room_id, latest_event_ids=latest_event_ids,
         )
@@ -498,9 +499,10 @@ class RoomMemberHandler(object):
         if prev_event is not None:
             return
 
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
         if event.membership == Membership.JOIN:
             if requester.is_guest:
-                guest_can_join = yield self._can_guest_join(context.prev_state_ids)
+                guest_can_join = yield self._can_guest_join(prev_state_ids)
                 if not guest_can_join:
                     # This should be an auth check, but guests are a local concept,
                     # so don't really fit into the general auth process.
@@ -519,7 +521,7 @@ class RoomMemberHandler(object):
             ratelimit=ratelimit,
         )
 
-        prev_member_event_id = context.prev_state_ids.get(
+        prev_member_event_id = prev_state_ids.get(
             (EventTypes.Member, event.state_key),
             None
         )
@@ -707,6 +709,10 @@ class RoomMemberHandler(object):
             inviter_display_name = member_event.content.get("displayname", "")
             inviter_avatar_url = member_event.content.get("avatar_url", "")
 
+        # if user has no display name, default to their MXID
+        if not inviter_display_name:
+            inviter_display_name = user.to_string()
+
         canonical_room_alias = ""
         canonical_alias_event = room_state.get((EventTypes.CanonicalAlias, ""))
         if canonical_alias_event:
diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py
index 493aec1e48..acc6eb8099 100644
--- a/synapse/handlers/room_member_worker.py
+++ b/synapse/handlers/room_member_worker.py
@@ -20,15 +20,24 @@ from twisted.internet import defer
 from synapse.api.errors import SynapseError
 from synapse.handlers.room_member import RoomMemberHandler
 from synapse.replication.http.membership import (
-    remote_join, remote_reject_invite, get_or_register_3pid_guest,
-    notify_user_membership_change,
+    ReplicationRegister3PIDGuestRestServlet as Repl3PID,
+    ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
+    ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite,
+    ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft,
 )
 
-
 logger = logging.getLogger(__name__)
 
 
 class RoomMemberWorkerHandler(RoomMemberHandler):
+    def __init__(self, hs):
+        super(RoomMemberWorkerHandler, self).__init__(hs)
+
+        self._get_register_3pid_client = Repl3PID.make_client(hs)
+        self._remote_join_client = ReplRemoteJoin.make_client(hs)
+        self._remote_reject_client = ReplRejectInvite.make_client(hs)
+        self._notify_change_client = ReplJoinedLeft.make_client(hs)
+
     @defer.inlineCallbacks
     def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
         """Implements RoomMemberHandler._remote_join
@@ -36,10 +45,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
         if len(remote_room_hosts) == 0:
             raise SynapseError(404, "No known servers")
 
-        ret = yield remote_join(
-            self.simple_http_client,
-            host=self.config.worker_replication_host,
-            port=self.config.worker_replication_http_port,
+        ret = yield self._remote_join_client(
             requester=requester,
             remote_room_hosts=remote_room_hosts,
             room_id=room_id,
@@ -54,10 +60,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
     def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
         """Implements RoomMemberHandler._remote_reject_invite
         """
-        return remote_reject_invite(
-            self.simple_http_client,
-            host=self.config.worker_replication_host,
-            port=self.config.worker_replication_http_port,
+        return self._remote_reject_client(
             requester=requester,
             remote_room_hosts=remote_room_hosts,
             room_id=room_id,
@@ -67,10 +70,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
     def _user_joined_room(self, target, room_id):
         """Implements RoomMemberHandler._user_joined_room
         """
-        return notify_user_membership_change(
-            self.simple_http_client,
-            host=self.config.worker_replication_host,
-            port=self.config.worker_replication_http_port,
+        return self._notify_change_client(
             user_id=target.to_string(),
             room_id=room_id,
             change="joined",
@@ -79,10 +79,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
     def _user_left_room(self, target, room_id):
         """Implements RoomMemberHandler._user_left_room
         """
-        return notify_user_membership_change(
-            self.simple_http_client,
-            host=self.config.worker_replication_host,
-            port=self.config.worker_replication_http_port,
+        return self._notify_change_client(
             user_id=target.to_string(),
             room_id=room_id,
             change="left",
@@ -91,10 +88,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
     def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
         """Implements RoomMemberHandler.get_or_register_3pid_guest
         """
-        return get_or_register_3pid_guest(
-            self.simple_http_client,
-            host=self.config.worker_replication_host,
-            port=self.config.worker_replication_http_port,
+        return self._get_register_3pid_client(
             requester=requester,
             medium=medium,
             address=address,
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 1eca26aa1e..c464adbd0b 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -13,21 +13,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
+import itertools
+import logging
 
-from ._base import BaseHandler
+from unpaddedbase64 import decode_base64, encode_base64
 
-from synapse.api.constants import Membership, EventTypes
-from synapse.api.filtering import Filter
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import SynapseError
+from synapse.api.filtering import Filter
 from synapse.events.utils import serialize_event
 from synapse.visibility import filter_events_for_client
 
-from unpaddedbase64 import decode_base64, encode_base64
-
-import itertools
-import logging
-
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
@@ -64,6 +63,13 @@ class SearchHandler(BaseHandler):
             except Exception:
                 raise SynapseError(400, "Invalid batch")
 
+        logger.info(
+            "Search batch properties: %r, %r, %r",
+            batch_group, batch_group_key, batch_token,
+        )
+
+        logger.info("Search content: %s", content)
+
         try:
             room_cat = content["search_categories"]["room_events"]
 
@@ -271,6 +277,8 @@ class SearchHandler(BaseHandler):
             # We should never get here due to the guard earlier.
             raise NotImplementedError()
 
+        logger.info("Found %d events to return", len(allowed_events))
+
         # If client has asked for "context" for each event (i.e. some surrounding
         # events and state), fetch that
         if event_context is not None:
@@ -279,7 +287,12 @@ class SearchHandler(BaseHandler):
             contexts = {}
             for event in allowed_events:
                 res = yield self.store.get_events_around(
-                    event.room_id, event.event_id, before_limit, after_limit
+                    event.room_id, event.event_id, before_limit, after_limit,
+                )
+
+                logger.info(
+                    "Context for search returned %d and %d events",
+                    len(res["events_before"]), len(res["events_after"]),
                 )
 
                 res["events_before"] = yield filter_events_for_client(
diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index e057ae54c9..7ecdede4dc 100644
--- a/synapse/handlers/set_password.py
+++ b/synapse/handlers/set_password.py
@@ -17,6 +17,7 @@ import logging
 from twisted.internet import defer
 
 from synapse.api.errors import Codes, StoreError, SynapseError
+
 from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 51ec727df0..ef20c2296c 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
-# Copyright 2015 - 2016 OpenMarket Ltd
+# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,24 +14,34 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.api.constants import Membership, EventTypes
-from synapse.util.async import concurrently_execute
+import collections
+import itertools
+import logging
+
+from six import iteritems, itervalues
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.push.clientformat import format_push_rules_for_user
+from synapse.types import RoomStreamToken
+from synapse.util.async_helpers import concurrently_execute
+from synapse.util.caches.expiringcache import ExpiringCache
+from synapse.util.caches.lrucache import LruCache
+from synapse.util.caches.response_cache import ResponseCache
 from synapse.util.logcontext import LoggingContext
 from synapse.util.metrics import Measure, measure_func
-from synapse.util.caches.response_cache import ResponseCache
-from synapse.push.clientformat import format_push_rules_for_user
 from synapse.visibility import filter_events_for_client
-from synapse.types import RoomStreamToken
 
-from twisted.internet import defer
-
-import collections
-import logging
-import itertools
+logger = logging.getLogger(__name__)
 
-from six import itervalues, iteritems
+# Store the cache that tracks which lazy-loaded members have been sent to a given
+# client for no more than 30 minutes.
+LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000
 
-logger = logging.getLogger(__name__)
+# Remember the last 100 members we sent to a client for the purposes of
+# avoiding redundantly sending the same lazy-loaded members to the client
+LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100
 
 
 SyncConfig = collections.namedtuple("SyncConfig", [
@@ -64,6 +75,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [
     "ephemeral",
     "account_data",
     "unread_notifications",
+    "summary",
 ])):
     __slots__ = []
 
@@ -145,7 +157,7 @@ class SyncResult(collections.namedtuple("SyncResult", [
     "invited",  # InvitedSyncResult for each invited room.
     "archived",  # ArchivedSyncResult for each archived room.
     "to_device",  # List of direct messages for the device.
-    "device_lists",  # List of user_ids whose devices have chanegd
+    "device_lists",  # List of user_ids whose devices have changed
     "device_one_time_keys_count",  # Dict of algorithm to count for one time keys
                                    # for this device
     "groups",
@@ -173,6 +185,7 @@ class SyncResult(collections.namedtuple("SyncResult", [
 class SyncHandler(object):
 
     def __init__(self, hs):
+        self.hs_config = hs.config
         self.store = hs.get_datastore()
         self.notifier = hs.get_notifier()
         self.presence_handler = hs.get_presence_handler()
@@ -180,20 +193,35 @@ class SyncHandler(object):
         self.clock = hs.get_clock()
         self.response_cache = ResponseCache(hs, "sync")
         self.state = hs.get_state_handler()
+        self.auth = hs.get_auth()
+
+        # ExpiringCache((User, Device)) -> LruCache(state_key => event_id)
+        self.lazy_loaded_members_cache = ExpiringCache(
+            "lazy_loaded_members_cache", self.clock,
+            max_len=0, expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE,
+        )
 
+    @defer.inlineCallbacks
     def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
                                full_state=False):
         """Get the sync for a client if we have new data for it now. Otherwise
         wait for new data to arrive on the server. If the timeout expires, then
         return an empty sync result.
         Returns:
-            A Deferred SyncResult.
+            Deferred[SyncResult]
         """
-        return self.response_cache.wrap(
+        # If the user is not part of the mau group, then check that limits have
+        # not been exceeded (if not part of the group by this point, almost certain
+        # auth_blocking will occur)
+        user_id = sync_config.user.to_string()
+        yield self.auth.check_auth_blocking(user_id)
+
+        res = yield self.response_cache.wrap(
             sync_config.request_key,
             self._wait_for_sync_for_user,
             sync_config, since_token, timeout, full_state,
         )
+        defer.returnValue(res)
 
     @defer.inlineCallbacks
     def _wait_for_sync_for_user(self, sync_config, since_token, timeout,
@@ -416,29 +444,44 @@ class SyncHandler(object):
         ))
 
     @defer.inlineCallbacks
-    def get_state_after_event(self, event):
+    def get_state_after_event(self, event, types=None, filtered_types=None):
         """
         Get the room state after the given event
 
         Args:
             event(synapse.events.EventBase): event of interest
+            types(list[(str, str|None)]|None): List of (type, state_key) tuples
+                which are used to filter the state fetched. If `state_key` is None,
+                all events are returned of the given type.
+                May be None, which matches any key.
+            filtered_types(list[str]|None): Only apply filtering via `types` to this
+                list of event types.  Other types of events are returned unfiltered.
+                If None, `types` filtering is applied to all events.
 
         Returns:
             A Deferred map from ((type, state_key)->Event)
         """
-        state_ids = yield self.store.get_state_ids_for_event(event.event_id)
+        state_ids = yield self.store.get_state_ids_for_event(
+            event.event_id, types, filtered_types=filtered_types,
+        )
         if event.is_state():
             state_ids = state_ids.copy()
             state_ids[(event.type, event.state_key)] = event.event_id
         defer.returnValue(state_ids)
 
     @defer.inlineCallbacks
-    def get_state_at(self, room_id, stream_position):
+    def get_state_at(self, room_id, stream_position, types=None, filtered_types=None):
         """ Get the room state at a particular stream position
 
         Args:
             room_id(str): room for which to get state
             stream_position(StreamToken): point at which to get state
+            types(list[(str, str|None)]|None): List of (type, state_key) tuples
+                which are used to filter the state fetched. If `state_key` is None,
+                all events are returned of the given type.
+            filtered_types(list[str]|None): Only apply filtering via `types` to this
+                list of event types.  Other types of events are returned unfiltered.
+                If None, `types` filtering is applied to all events.
 
         Returns:
             A Deferred map from ((type, state_key)->Event)
@@ -453,7 +496,9 @@ class SyncHandler(object):
 
         if last_events:
             last_event = last_events[-1]
-            state = yield self.get_state_after_event(last_event)
+            state = yield self.get_state_after_event(
+                last_event, types, filtered_types=filtered_types,
+            )
 
         else:
             # no events in this room - so presumably no state
@@ -461,9 +506,141 @@ class SyncHandler(object):
         defer.returnValue(state)
 
     @defer.inlineCallbacks
+    def compute_summary(self, room_id, sync_config, batch, state, now_token):
+        """ Works out a room summary block for this room, summarising the number
+        of joined members in the room, and providing the 'hero' members if the
+        room has no name so clients can consistently name rooms.  Also adds
+        state events to 'state' if needed to describe the heroes.
+
+        Args:
+            room_id(str):
+            sync_config(synapse.handlers.sync.SyncConfig):
+            batch(synapse.handlers.sync.TimelineBatch): The timeline batch for
+                the room that will be sent to the user.
+            state(dict): dict of (type, state_key) -> Event as returned by
+                compute_state_delta
+            now_token(str): Token of the end of the current batch.
+
+        Returns:
+             A deferred dict describing the room summary
+        """
+
+        # FIXME: this promulgates https://github.com/matrix-org/synapse/issues/3305
+        last_events, _ = yield self.store.get_recent_event_ids_for_room(
+            room_id, end_token=now_token.room_key, limit=1,
+        )
+
+        if not last_events:
+            defer.returnValue(None)
+            return
+
+        last_event = last_events[-1]
+        state_ids = yield self.store.get_state_ids_for_event(
+            last_event.event_id, [
+                (EventTypes.Member, None),
+                (EventTypes.Name, ''),
+                (EventTypes.CanonicalAlias, ''),
+            ]
+        )
+
+        member_ids = {
+            state_key: event_id
+            for (t, state_key), event_id in state_ids.iteritems()
+            if t == EventTypes.Member
+        }
+        name_id = state_ids.get((EventTypes.Name, ''))
+        canonical_alias_id = state_ids.get((EventTypes.CanonicalAlias, ''))
+
+        summary = {}
+
+        # FIXME: it feels very heavy to load up every single membership event
+        # just to calculate the counts.
+        member_events = yield self.store.get_events(member_ids.values())
+
+        joined_user_ids = []
+        invited_user_ids = []
+
+        for ev in member_events.values():
+            if ev.content.get("membership") == Membership.JOIN:
+                joined_user_ids.append(ev.state_key)
+            elif ev.content.get("membership") == Membership.INVITE:
+                invited_user_ids.append(ev.state_key)
+
+        # TODO: only send these when they change.
+        summary["m.joined_member_count"] = len(joined_user_ids)
+        summary["m.invited_member_count"] = len(invited_user_ids)
+
+        if name_id or canonical_alias_id:
+            defer.returnValue(summary)
+
+        # FIXME: order by stream ordering, not alphabetic
+
+        me = sync_config.user.to_string()
+        if (joined_user_ids or invited_user_ids):
+            summary['m.heroes'] = sorted(
+                [
+                    user_id
+                    for user_id in (joined_user_ids + invited_user_ids)
+                    if user_id != me
+                ]
+            )[0:5]
+        else:
+            summary['m.heroes'] = sorted(
+                [user_id for user_id in member_ids.keys() if user_id != me]
+            )[0:5]
+
+        if not sync_config.filter_collection.lazy_load_members():
+            defer.returnValue(summary)
+
+        # ensure we send membership events for heroes if needed
+        cache_key = (sync_config.user.to_string(), sync_config.device_id)
+        cache = self.get_lazy_loaded_members_cache(cache_key)
+
+        # track which members the client should already know about via LL:
+        # Ones which are already in state...
+        existing_members = set(
+            user_id for (typ, user_id) in state.keys()
+            if typ == EventTypes.Member
+        )
+
+        # ...or ones which are in the timeline...
+        for ev in batch.events:
+            if ev.type == EventTypes.Member:
+                existing_members.add(ev.state_key)
+
+        # ...and then ensure any missing ones get included in state.
+        missing_hero_event_ids = [
+            member_ids[hero_id]
+            for hero_id in summary['m.heroes']
+            if (
+                cache.get(hero_id) != member_ids[hero_id] and
+                hero_id not in existing_members
+            )
+        ]
+
+        missing_hero_state = yield self.store.get_events(missing_hero_event_ids)
+        missing_hero_state = missing_hero_state.values()
+
+        for s in missing_hero_state:
+            cache.set(s.state_key, s.event_id)
+            state[(EventTypes.Member, s.state_key)] = s
+
+        defer.returnValue(summary)
+
+    def get_lazy_loaded_members_cache(self, cache_key):
+        cache = self.lazy_loaded_members_cache.get(cache_key)
+        if cache is None:
+            logger.debug("creating LruCache for %r", cache_key)
+            cache = LruCache(LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE)
+            self.lazy_loaded_members_cache[cache_key] = cache
+        else:
+            logger.debug("found LruCache for %r", cache_key)
+        return cache
+
+    @defer.inlineCallbacks
     def compute_state_delta(self, room_id, batch, sync_config, since_token, now_token,
                             full_state):
-        """ Works out the differnce in state between the start of the timeline
+        """ Works out the difference in state between the start of the timeline
         and the previous sync.
 
         Args:
@@ -477,7 +654,7 @@ class SyncHandler(object):
             full_state(bool): Whether to force returning the full state.
 
         Returns:
-             A deferred new event dictionary
+             A deferred dict of (type, state_key) -> Event
         """
         # TODO(mjark) Check if the state events were received by the server
         # after the previous sync, since we need to include those state
@@ -485,59 +662,130 @@ class SyncHandler(object):
         # TODO(mjark) Check for new redactions in the state events.
 
         with Measure(self.clock, "compute_state_delta"):
+
+            types = None
+            filtered_types = None
+
+            lazy_load_members = sync_config.filter_collection.lazy_load_members()
+            include_redundant_members = (
+                sync_config.filter_collection.include_redundant_members()
+            )
+
+            if lazy_load_members:
+                # We only request state for the members needed to display the
+                # timeline:
+
+                types = [
+                    (EventTypes.Member, state_key)
+                    for state_key in set(
+                        event.sender  # FIXME: we also care about invite targets etc.
+                        for event in batch.events
+                    )
+                ]
+
+                # only apply the filtering to room members
+                filtered_types = [EventTypes.Member]
+
+            timeline_state = {
+                (event.type, event.state_key): event.event_id
+                for event in batch.events if event.is_state()
+            }
+
             if full_state:
                 if batch:
                     current_state_ids = yield self.store.get_state_ids_for_event(
-                        batch.events[-1].event_id
+                        batch.events[-1].event_id, types=types,
+                        filtered_types=filtered_types,
                     )
 
                     state_ids = yield self.store.get_state_ids_for_event(
-                        batch.events[0].event_id
+                        batch.events[0].event_id, types=types,
+                        filtered_types=filtered_types,
                     )
+
                 else:
                     current_state_ids = yield self.get_state_at(
-                        room_id, stream_position=now_token
+                        room_id, stream_position=now_token, types=types,
+                        filtered_types=filtered_types,
                     )
 
                     state_ids = current_state_ids
 
-                timeline_state = {
-                    (event.type, event.state_key): event.event_id
-                    for event in batch.events if event.is_state()
-                }
-
                 state_ids = _calculate_state(
                     timeline_contains=timeline_state,
                     timeline_start=state_ids,
                     previous={},
                     current=current_state_ids,
+                    lazy_load_members=lazy_load_members,
                 )
             elif batch.limited:
                 state_at_previous_sync = yield self.get_state_at(
-                    room_id, stream_position=since_token
+                    room_id, stream_position=since_token, types=types,
+                    filtered_types=filtered_types,
                 )
 
                 current_state_ids = yield self.store.get_state_ids_for_event(
-                    batch.events[-1].event_id
+                    batch.events[-1].event_id, types=types,
+                    filtered_types=filtered_types,
                 )
 
                 state_at_timeline_start = yield self.store.get_state_ids_for_event(
-                    batch.events[0].event_id
+                    batch.events[0].event_id, types=types,
+                    filtered_types=filtered_types,
                 )
 
-                timeline_state = {
-                    (event.type, event.state_key): event.event_id
-                    for event in batch.events if event.is_state()
-                }
-
                 state_ids = _calculate_state(
                     timeline_contains=timeline_state,
                     timeline_start=state_at_timeline_start,
                     previous=state_at_previous_sync,
                     current=current_state_ids,
+                    lazy_load_members=lazy_load_members,
                 )
             else:
                 state_ids = {}
+                if lazy_load_members:
+                    if types:
+                        # We're returning an incremental sync, with no "gap" since
+                        # the previous sync, so normally there would be no state to return
+                        # But we're lazy-loading, so the client might need some more
+                        # member events to understand the events in this timeline.
+                        # So we fish out all the member events corresponding to the
+                        # timeline here, and then dedupe any redundant ones below.
+
+                        state_ids = yield self.store.get_state_ids_for_event(
+                            batch.events[0].event_id, types=types,
+                            filtered_types=None,  # we only want members!
+                        )
+
+            if lazy_load_members and not include_redundant_members:
+                cache_key = (sync_config.user.to_string(), sync_config.device_id)
+                cache = self.get_lazy_loaded_members_cache(cache_key)
+
+                # if it's a new sync sequence, then assume the client has had
+                # amnesia and doesn't want any recent lazy-loaded members
+                # de-duplicated.
+                if since_token is None:
+                    logger.debug("clearing LruCache for %r", cache_key)
+                    cache.clear()
+                else:
+                    # only send members which aren't in our LruCache (either
+                    # because they're new to this client or have been pushed out
+                    # of the cache)
+                    logger.debug("filtering state from %r...", state_ids)
+                    state_ids = {
+                        t: event_id
+                        for t, event_id in state_ids.iteritems()
+                        if cache.get(t[1]) != event_id
+                    }
+                    logger.debug("...to %r", state_ids)
+
+                # add any member IDs we are about to send into our LruCache
+                for t, event_id in itertools.chain(
+                    state_ids.items(),
+                    timeline_state.items(),
+                ):
+                    if t[0] == EventTypes.Member:
+                        cache.set(t[1], event_id)
 
         state = {}
         if state_ids:
@@ -620,7 +868,7 @@ class SyncHandler(object):
             since_token is None and
             sync_config.filter_collection.blocks_all_presence()
         )
-        if not block_all_presence_data:
+        if self.hs_config.use_presence and not block_all_presence_data:
             yield self._generate_sync_entry_for_presence(
                 sync_result_builder, newly_joined_rooms, newly_joined_users
             )
@@ -1312,7 +1560,6 @@ class SyncHandler(object):
             if events == [] and tags is None:
                 return
 
-        since_token = sync_result_builder.since_token
         now_token = sync_result_builder.now_token
         sync_config = sync_result_builder.sync_config
 
@@ -1355,6 +1602,18 @@ class SyncHandler(object):
             full_state=full_state
         )
 
+        summary = {}
+        if (
+            sync_config.filter_collection.lazy_load_members() and
+            (
+                any(ev.type == EventTypes.Member for ev in batch.events) or
+                since_token is None
+            )
+        ):
+            summary = yield self.compute_summary(
+                room_id, sync_config, batch, state, now_token
+            )
+
         if room_builder.rtype == "joined":
             unread_notifications = {}
             room_sync = JoinedSyncResult(
@@ -1364,6 +1623,7 @@ class SyncHandler(object):
                 ephemeral=ephemeral,
                 account_data=account_data_events,
                 unread_notifications=unread_notifications,
+                summary=summary,
             )
 
             if room_sync or always_include:
@@ -1448,7 +1708,9 @@ def _action_has_highlight(actions):
     return False
 
 
-def _calculate_state(timeline_contains, timeline_start, previous, current):
+def _calculate_state(
+    timeline_contains, timeline_start, previous, current, lazy_load_members,
+):
     """Works out what state to include in a sync response.
 
     Args:
@@ -1457,6 +1719,9 @@ def _calculate_state(timeline_contains, timeline_start, previous, current):
         previous (dict): state at the end of the previous sync (or empty dict
             if this is an initial sync)
         current (dict): state at the end of the timeline
+        lazy_load_members (bool): whether to return members from timeline_start
+            or not.  assumes that timeline_start has already been filtered to
+            include only the members the client needs to know about.
 
     Returns:
         dict
@@ -1472,9 +1737,25 @@ def _calculate_state(timeline_contains, timeline_start, previous, current):
     }
 
     c_ids = set(e for e in current.values())
-    tc_ids = set(e for e in timeline_contains.values())
-    p_ids = set(e for e in previous.values())
     ts_ids = set(e for e in timeline_start.values())
+    p_ids = set(e for e in previous.values())
+    tc_ids = set(e for e in timeline_contains.values())
+
+    # If we are lazyloading room members, we explicitly add the membership events
+    # for the senders in the timeline into the state block returned by /sync,
+    # as we may not have sent them to the client before.  We find these membership
+    # events by filtering them out of timeline_start, which has already been filtered
+    # to only include membership events for the senders in the timeline.
+    # In practice, we can do this by removing them from the p_ids list,
+    # which is the list of relevant state we know we have already sent to the client.
+    # see https://github.com/matrix-org/synapse/pull/2970
+    #            /files/efcdacad7d1b7f52f879179701c7e0d9b763511f#r204732809
+
+    if lazy_load_members:
+        p_ids.difference_update(
+            e for t, e in timeline_start.iteritems()
+            if t[0] == EventTypes.Member
+        )
 
     state_ids = ((c_ids | ts_ids) - p_ids) - tc_ids
 
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 5d9736e88f..2d2d3d5a0d 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -13,17 +13,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+from collections import namedtuple
+
 from twisted.internet import defer
 
-from synapse.api.errors import SynapseError, AuthError
+from synapse.api.errors import AuthError, SynapseError
+from synapse.types import UserID, get_domain_from_id
 from synapse.util.logcontext import run_in_background
 from synapse.util.metrics import Measure
 from synapse.util.wheel_timer import WheelTimer
-from synapse.types import UserID, get_domain_from_id
-
-import logging
-
-from collections import namedtuple
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index a39f0f7343..d8413d6aa7 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -14,15 +14,15 @@
 # limitations under the License.
 
 import logging
+
+from six import iteritems
+
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, JoinRules, Membership
 from synapse.storage.roommember import ProfileInfo
-from synapse.util.metrics import Measure
-from synapse.util.async import sleep
 from synapse.types import get_localpart_from_id
-
-from six import iteritems
+from synapse.util.metrics import Measure
 
 logger = logging.getLogger(__name__)
 
@@ -119,6 +119,8 @@ class UserDirectoryHandler(object):
         """Called to update index of our local user profiles when they change
         irrespective of any rooms the user may be in.
         """
+        # FIXME(#3714): We should probably do this in the same worker as all
+        # the other changes.
         yield self.store.update_profile_in_user_dir(
             user_id, profile.display_name, profile.avatar_url, None,
         )
@@ -127,6 +129,8 @@ class UserDirectoryHandler(object):
     def handle_user_deactivated(self, user_id):
         """Called when a user ID is deactivated
         """
+        # FIXME(#3714): We should probably do this in the same worker as all
+        # the other changes.
         yield self.store.remove_from_user_dir(user_id)
         yield self.store.remove_from_user_in_public_room(user_id)
 
@@ -174,7 +178,7 @@ class UserDirectoryHandler(object):
             logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids))
             yield self._handle_initial_room(room_id)
             num_processed_rooms += 1
-            yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
+            yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
 
         logger.info("Processed all rooms.")
 
@@ -188,7 +192,7 @@ class UserDirectoryHandler(object):
                 logger.info("Handling user %d/%d", num_processed_users + 1, len(user_ids))
                 yield self._handle_local_user(user_id)
                 num_processed_users += 1
-                yield sleep(self.INITIAL_USER_SLEEP_MS / 1000.)
+                yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.)
 
             logger.info("Processed all users")
 
@@ -236,7 +240,7 @@ class UserDirectoryHandler(object):
         count = 0
         for user_id in user_ids:
             if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
-                yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
+                yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
 
             if not self.is_mine_id(user_id):
                 count += 1
@@ -251,7 +255,7 @@ class UserDirectoryHandler(object):
                     continue
 
                 if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
-                    yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
+                    yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
                 count += 1
 
                 user_set = (user_id, other_user_id)
diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py
index a797396ade..0e10e3f8f7 100644
--- a/synapse/http/additional_resource.py
+++ b/synapse/http/additional_resource.py
@@ -13,10 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.http.server import wrap_json_request_handler
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
+from synapse.http.server import wrap_json_request_handler
+
 
 class AdditionalResource(Resource):
     """Resource wrapper for additional_resources
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 8064a84c5c..ab4fbf59b2 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -13,39 +13,38 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from OpenSSL import SSL
-from OpenSSL.SSL import VERIFY_NONE
+import logging
+import urllib
 
-from synapse.api.errors import (
-    CodeMessageException, MatrixCodeMessageException, SynapseError, Codes,
-)
-from synapse.http import cancelled_to_request_timed_out_error, redact_uri
-from synapse.util.async import add_timeout_to_deferred
-from synapse.util.caches import CACHE_SIZE_FACTOR
-from synapse.util.logcontext import make_deferred_yieldable
-from synapse.http.endpoint import SpiderEndpoint
+from six import StringIO
 
-from canonicaljson import encode_canonical_json
+from canonicaljson import encode_canonical_json, json
+from prometheus_client import Counter
 
-from twisted.internet import defer, reactor, ssl, protocol, task
+from OpenSSL import SSL
+from OpenSSL.SSL import VERIFY_NONE
+from twisted.internet import defer, protocol, reactor, ssl, task
 from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
+from twisted.web._newclient import ResponseDone
 from twisted.web.client import (
-    BrowserLikeRedirectAgent, ContentDecoderAgent, GzipDecoder, Agent,
-    readBody, PartialDownloadError,
+    Agent,
+    BrowserLikeRedirectAgent,
+    ContentDecoderAgent,
+    FileBodyProducer as TwistedFileBodyProducer,
+    GzipDecoder,
     HTTPConnectionPool,
+    PartialDownloadError,
+    readBody,
 )
-from twisted.web.client import FileBodyProducer as TwistedFileBodyProducer
 from twisted.web.http import PotentialDataLoss
 from twisted.web.http_headers import Headers
-from twisted.web._newclient import ResponseDone
-
-from six import StringIO
-
-from prometheus_client import Counter
-import simplejson as json
-import logging
-import urllib
 
+from synapse.api.errors import Codes, HttpResponseException, SynapseError
+from synapse.http import cancelled_to_request_timed_out_error, redact_uri
+from synapse.http.endpoint import SpiderEndpoint
+from synapse.util.async_helpers import add_timeout_to_deferred
+from synapse.util.caches import CACHE_SIZE_FACTOR
+from synapse.util.logcontext import make_deferred_yieldable
 
 logger = logging.getLogger(__name__)
 
@@ -98,8 +97,8 @@ class SimpleHttpClient(object):
                 method, uri, *args, **kwargs
             )
             add_timeout_to_deferred(
-                request_deferred,
-                60, cancelled_to_request_timed_out_error,
+                request_deferred, 60, self.hs.get_reactor(),
+                cancelled_to_request_timed_out_error,
             )
             response = yield make_deferred_yieldable(request_deferred)
 
@@ -115,7 +114,7 @@ class SimpleHttpClient(object):
                 "Error sending request to  %s %s: %s %s",
                 method, redact_uri(uri), type(e).__name__, e.message
             )
-            raise e
+            raise
 
     @defer.inlineCallbacks
     def post_urlencoded_get_json(self, uri, args={}, headers=None):
@@ -128,6 +127,11 @@ class SimpleHttpClient(object):
 
         Returns:
             Deferred[object]: parsed json
+
+        Raises:
+            HttpResponseException: On a non-2xx HTTP response.
+
+            ValueError: if the response was not JSON
         """
 
         # TODO: Do we ever want to log message contents?
@@ -151,7 +155,10 @@ class SimpleHttpClient(object):
 
         body = yield make_deferred_yieldable(readBody(response))
 
-        defer.returnValue(json.loads(body))
+        if 200 <= response.code < 300:
+            defer.returnValue(json.loads(body))
+        else:
+            raise HttpResponseException(response.code, response.phrase, body)
 
     @defer.inlineCallbacks
     def post_json_get_json(self, uri, post_json, headers=None):
@@ -165,6 +172,11 @@ class SimpleHttpClient(object):
 
         Returns:
             Deferred[object]: parsed json
+
+        Raises:
+            HttpResponseException: On a non-2xx HTTP response.
+
+            ValueError: if the response was not JSON
         """
         json_str = encode_canonical_json(post_json)
 
@@ -189,9 +201,7 @@ class SimpleHttpClient(object):
         if 200 <= response.code < 300:
             defer.returnValue(json.loads(body))
         else:
-            raise self._exceptionFromFailedRequest(response, body)
-
-        defer.returnValue(json.loads(body))
+            raise HttpResponseException(response.code, response.phrase, body)
 
     @defer.inlineCallbacks
     def get_json(self, uri, args={}, headers=None):
@@ -209,14 +219,12 @@ class SimpleHttpClient(object):
             Deferred: Succeeds when we get *any* 2xx HTTP response, with the
             HTTP body as JSON.
         Raises:
-            On a non-2xx HTTP response. The response body will be used as the
-            error message.
+            HttpResponseException On a non-2xx HTTP response.
+
+            ValueError: if the response was not JSON
         """
-        try:
-            body = yield self.get_raw(uri, args, headers=headers)
-            defer.returnValue(json.loads(body))
-        except CodeMessageException as e:
-            raise self._exceptionFromFailedRequest(e.code, e.msg)
+        body = yield self.get_raw(uri, args, headers=headers)
+        defer.returnValue(json.loads(body))
 
     @defer.inlineCallbacks
     def put_json(self, uri, json_body, args={}, headers=None):
@@ -235,7 +243,9 @@ class SimpleHttpClient(object):
             Deferred: Succeeds when we get *any* 2xx HTTP response, with the
             HTTP body as JSON.
         Raises:
-            On a non-2xx HTTP response.
+            HttpResponseException On a non-2xx HTTP response.
+
+            ValueError: if the response was not JSON
         """
         if len(args):
             query_bytes = urllib.urlencode(args, True)
@@ -262,10 +272,7 @@ class SimpleHttpClient(object):
         if 200 <= response.code < 300:
             defer.returnValue(json.loads(body))
         else:
-            # NB: This is explicitly not json.loads(body)'d because the contract
-            # of CodeMessageException is a *string* message. Callers can always
-            # load it into JSON if they want.
-            raise CodeMessageException(response.code, body)
+            raise HttpResponseException(response.code, response.phrase, body)
 
     @defer.inlineCallbacks
     def get_raw(self, uri, args={}, headers=None):
@@ -283,8 +290,7 @@ class SimpleHttpClient(object):
             Deferred: Succeeds when we get *any* 2xx HTTP response, with the
             HTTP body at text.
         Raises:
-            On a non-2xx HTTP response. The response body will be used as the
-            error message.
+            HttpResponseException on a non-2xx HTTP response.
         """
         if len(args):
             query_bytes = urllib.urlencode(args, True)
@@ -307,16 +313,7 @@ class SimpleHttpClient(object):
         if 200 <= response.code < 300:
             defer.returnValue(body)
         else:
-            raise CodeMessageException(response.code, body)
-
-    def _exceptionFromFailedRequest(self, response, body):
-        try:
-            jsonBody = json.loads(body)
-            errcode = jsonBody['errcode']
-            error = jsonBody['error']
-            return MatrixCodeMessageException(response.code, error, errcode)
-        except (ValueError, KeyError):
-            return CodeMessageException(response.code, body)
+            raise HttpResponseException(response.code, response.phrase, body)
 
     # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient.
     # The two should be factored out.
diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py
index 87a482650d..b0c9369519 100644
--- a/synapse/http/endpoint.py
+++ b/synapse/http/endpoint.py
@@ -12,21 +12,20 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
-from twisted.internet import defer, reactor
-from twisted.internet.error import ConnectError
-from twisted.names import client, dns
-from twisted.names.error import DNSNameError, DomainError
-
 import collections
 import logging
 import random
+import re
 import time
 
+from twisted.internet import defer
+from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
+from twisted.internet.error import ConnectError
+from twisted.names import client, dns
+from twisted.names.error import DNSNameError, DomainError
 
 logger = logging.getLogger(__name__)
 
-
 SERVER_CACHE = {}
 
 # our record of an individual server which can be tried to reach a destination.
@@ -38,34 +37,98 @@ _Server = collections.namedtuple(
 )
 
 
-def matrix_federation_endpoint(reactor, destination, ssl_context_factory=None,
+def parse_server_name(server_name):
+    """Split a server name into host/port parts.
+
+    Args:
+        server_name (str): server name to parse
+
+    Returns:
+        Tuple[str, int|None]: host/port parts.
+
+    Raises:
+        ValueError if the server name could not be parsed.
+    """
+    try:
+        if server_name[-1] == ']':
+            # ipv6 literal, hopefully
+            return server_name, None
+
+        domain_port = server_name.rsplit(":", 1)
+        domain = domain_port[0]
+        port = int(domain_port[1]) if domain_port[1:] else None
+        return domain, port
+    except Exception:
+        raise ValueError("Invalid server name '%s'" % server_name)
+
+
+VALID_HOST_REGEX = re.compile(
+    "\\A[0-9a-zA-Z.-]+\\Z",
+)
+
+
+def parse_and_validate_server_name(server_name):
+    """Split a server name into host/port parts and do some basic validation.
+
+    Args:
+        server_name (str): server name to parse
+
+    Returns:
+        Tuple[str, int|None]: host/port parts.
+
+    Raises:
+        ValueError if the server name could not be parsed.
+    """
+    host, port = parse_server_name(server_name)
+
+    # these tests don't need to be bulletproof as we'll find out soon enough
+    # if somebody is giving us invalid data. What we *do* need is to be sure
+    # that nobody is sneaking IP literals in that look like hostnames, etc.
+
+    # look for ipv6 literals
+    if host[0] == '[':
+        if host[-1] != ']':
+            raise ValueError("Mismatched [...] in server name '%s'" % (
+                server_name,
+            ))
+        return host, port
+
+    # otherwise it should only be alphanumerics.
+    if not VALID_HOST_REGEX.match(host):
+        raise ValueError("Server name '%s' contains invalid characters" % (
+            server_name,
+        ))
+
+    return host, port
+
+
+def matrix_federation_endpoint(reactor, destination, tls_client_options_factory=None,
                                timeout=None):
     """Construct an endpoint for the given matrix destination.
 
     Args:
         reactor: Twisted reactor.
         destination (bytes): The name of the server to connect to.
-        ssl_context_factory (twisted.internet.ssl.ContextFactory): Factory
-            which generates SSL contexts to use for TLS.
+        tls_client_options_factory
+            (synapse.crypto.context_factory.ClientTLSOptionsFactory):
+            Factory which generates TLS options for client connections.
         timeout (int): connection timeout in seconds
     """
 
-    domain_port = destination.split(":")
-    domain = domain_port[0]
-    port = int(domain_port[1]) if domain_port[1:] else None
+    domain, port = parse_server_name(destination)
 
     endpoint_kw_args = {}
 
     if timeout is not None:
         endpoint_kw_args.update(timeout=timeout)
 
-    if ssl_context_factory is None:
+    if tls_client_options_factory is None:
         transport_endpoint = HostnameEndpoint
         default_port = 8008
     else:
         def transport_endpoint(reactor, host, port, timeout):
             return wrapClientTLS(
-                ssl_context_factory,
+                tls_client_options_factory.get_options(host),
                 HostnameEndpoint(reactor, host, port, timeout=timeout))
         default_port = 8448
 
@@ -74,21 +137,22 @@ def matrix_federation_endpoint(reactor, destination, ssl_context_factory=None,
             reactor, "matrix", domain, protocol="tcp",
             default_port=default_port, endpoint=transport_endpoint,
             endpoint_kw_args=endpoint_kw_args
-        ))
+        ), reactor)
     else:
         return _WrappingEndpointFac(transport_endpoint(
             reactor, domain, port, **endpoint_kw_args
-        ))
+        ), reactor)
 
 
 class _WrappingEndpointFac(object):
-    def __init__(self, endpoint_fac):
+    def __init__(self, endpoint_fac, reactor):
         self.endpoint_fac = endpoint_fac
+        self.reactor = reactor
 
     @defer.inlineCallbacks
     def connect(self, protocolFactory):
         conn = yield self.endpoint_fac.connect(protocolFactory)
-        conn = _WrappedConnection(conn)
+        conn = _WrappedConnection(conn, self.reactor)
         defer.returnValue(conn)
 
 
@@ -98,9 +162,10 @@ class _WrappedConnection(object):
     """
     __slots__ = ["conn", "last_request"]
 
-    def __init__(self, conn):
+    def __init__(self, conn, reactor):
         object.__setattr__(self, "conn", conn)
         object.__setattr__(self, "last_request", time.time())
+        self._reactor = reactor
 
     def __getattr__(self, name):
         return getattr(self.conn, name)
@@ -131,14 +196,14 @@ class _WrappedConnection(object):
         # Time this connection out if we haven't send a request in the last
         # N minutes
         # TODO: Cancel the previous callLater?
-        reactor.callLater(3 * 60, self._time_things_out_maybe)
+        self._reactor.callLater(3 * 60, self._time_things_out_maybe)
 
         d = self.conn.request(request)
 
         def update_request_time(res):
             self.last_request = time.time()
             # TODO: Cancel the previous callLater?
-            reactor.callLater(3 * 60, self._time_things_out_maybe)
+            self._reactor.callLater(3 * 60, self._time_things_out_maybe)
             return res
 
         d.addCallback(update_request_time)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 821aed362b..b34bb8e31a 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -13,39 +13,38 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from twisted.internet import defer, reactor, protocol
-from twisted.internet.error import DNSLookupError
-from twisted.web.client import readBody, HTTPConnectionPool, Agent
-from twisted.web.http_headers import Headers
-from twisted.web._newclient import ResponseDone
-
-from synapse.http import cancelled_to_request_timed_out_error
-from synapse.http.endpoint import matrix_federation_endpoint
-import synapse.metrics
-from synapse.util.async import sleep, add_timeout_to_deferred
-from synapse.util import logcontext
-from synapse.util.logcontext import make_deferred_yieldable
-import synapse.util.retryutils
-
-from canonicaljson import encode_canonical_json
-
-from synapse.api.errors import (
-    SynapseError, Codes, HttpResponseException, FederationDeniedError,
-)
-
-from signedjson.sign import sign_json
-
 import cgi
-import simplejson as json
 import logging
 import random
 import sys
 import urllib
-from six.moves.urllib import parse as urlparse
-from six import string_types
 
+from six import string_types
+from six.moves.urllib import parse as urlparse
 
+from canonicaljson import encode_canonical_json, json
 from prometheus_client import Counter
+from signedjson.sign import sign_json
+
+from twisted.internet import defer, protocol, reactor
+from twisted.internet.error import DNSLookupError
+from twisted.web._newclient import ResponseDone
+from twisted.web.client import Agent, HTTPConnectionPool, readBody
+from twisted.web.http_headers import Headers
+
+import synapse.metrics
+import synapse.util.retryutils
+from synapse.api.errors import (
+    Codes,
+    FederationDeniedError,
+    HttpResponseException,
+    SynapseError,
+)
+from synapse.http import cancelled_to_request_timed_out_error
+from synapse.http.endpoint import matrix_federation_endpoint
+from synapse.util import logcontext
+from synapse.util.async_helpers import add_timeout_to_deferred
+from synapse.util.logcontext import make_deferred_yieldable
 
 logger = logging.getLogger(__name__)
 outbound_logger = logging.getLogger("synapse.http.outbound")
@@ -62,14 +61,14 @@ MAX_SHORT_RETRIES = 3
 
 class MatrixFederationEndpointFactory(object):
     def __init__(self, hs):
-        self.tls_server_context_factory = hs.tls_server_context_factory
+        self.tls_client_options_factory = hs.tls_client_options_factory
 
     def endpointForURI(self, uri):
         destination = uri.netloc
 
         return matrix_federation_endpoint(
             reactor, destination, timeout=10,
-            ssl_context_factory=self.tls_server_context_factory
+            tls_client_options_factory=self.tls_client_options_factory
         )
 
 
@@ -134,7 +133,7 @@ class MatrixFederationHttpClient(object):
                 failures, connection failures, SSL failures.)
         """
         if (
-            self.hs.config.federation_domain_whitelist and
+            self.hs.config.federation_domain_whitelist is not None and
             destination not in self.hs.config.federation_domain_whitelist
         ):
             raise FederationDeniedError(destination)
@@ -193,6 +192,7 @@ class MatrixFederationHttpClient(object):
                         add_timeout_to_deferred(
                             request_deferred,
                             timeout / 1000. if timeout else 60,
+                            self.hs.get_reactor(),
                             cancelled_to_request_timed_out_error,
                         )
                         response = yield make_deferred_yieldable(
@@ -234,7 +234,7 @@ class MatrixFederationHttpClient(object):
                                 delay = min(delay, 2)
                                 delay *= random.uniform(0.8, 1.4)
 
-                            yield sleep(delay)
+                            yield self.clock.sleep(delay)
                             retries_left -= 1
                         else:
                             raise
@@ -260,14 +260,35 @@ class MatrixFederationHttpClient(object):
             defer.returnValue(response)
 
     def sign_request(self, destination, method, url_bytes, headers_dict,
-                     content=None):
+                     content=None, destination_is=None):
+        """
+        Signs a request by adding an Authorization header to headers_dict
+        Args:
+            destination (bytes|None): The desination home server of the request.
+                May be None if the destination is an identity server, in which case
+                destination_is must be non-None.
+            method (bytes): The HTTP method of the request
+            url_bytes (bytes): The URI path of the request
+            headers_dict (dict): Dictionary of request headers to append to
+            content (bytes): The body of the request
+            destination_is (bytes): As 'destination', but if the destination is an
+                identity server
+
+        Returns:
+            None
+        """
         request = {
             "method": method,
             "uri": url_bytes,
             "origin": self.server_name,
-            "destination": destination,
         }
 
+        if destination is not None:
+            request["destination"] = destination
+
+        if destination_is is not None:
+            request["destination_is"] = destination_is
+
         if content is not None:
             request["content"] = content
 
@@ -418,7 +439,7 @@ class MatrixFederationHttpClient(object):
         defer.returnValue(json.loads(body))
 
     @defer.inlineCallbacks
-    def get_json(self, destination, path, args={}, retry_on_dns_fail=True,
+    def get_json(self, destination, path, args=None, retry_on_dns_fail=True,
                  timeout=None, ignore_backoff=False):
         """ GETs some json from the given host homeserver and path
 
@@ -426,7 +447,7 @@ class MatrixFederationHttpClient(object):
             destination (str): The remote server to send the HTTP request
                 to.
             path (str): The HTTP path.
-            args (dict): A dictionary used to create query strings, defaults to
+            args (dict|None): A dictionary used to create query strings, defaults to
                 None.
             timeout (int): How long to try (in ms) the destination for before
                 giving up. None indicates no timeout and that the request will
@@ -681,6 +702,9 @@ def check_content_type_is_json(headers):
 
 
 def encode_query_args(args):
+    if args is None:
+        return b""
+
     encoded_args = {}
     for k, vs in args.items():
         if isinstance(vs, string_types):
diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py
index dc06f6c443..72c2654678 100644
--- a/synapse/http/request_metrics.py
+++ b/synapse/http/request_metrics.py
@@ -15,10 +15,11 @@
 # limitations under the License.
 
 import logging
+import threading
 
 from prometheus_client.core import Counter, Histogram
-from synapse.metrics import LaterGauge
 
+from synapse.metrics import LaterGauge
 from synapse.util.logcontext import LoggingContext
 
 logger = logging.getLogger(__name__)
@@ -38,7 +39,8 @@ outgoing_responses_counter = Counter(
 )
 
 response_timer = Histogram(
-    "synapse_http_server_response_time_seconds", "sec", ["method", "servlet", "tag"]
+    "synapse_http_server_response_time_seconds", "sec",
+    ["method", "servlet", "tag", "code"],
 )
 
 response_ru_utime = Counter(
@@ -110,6 +112,9 @@ in_flight_requests_db_sched_duration = Counter(
 # The set of all in flight requests, set[RequestMetrics]
 _in_flight_requests = set()
 
+# Protects the _in_flight_requests set from concurrent accesss
+_in_flight_requests_lock = threading.Lock()
+
 
 def _get_in_flight_counts():
     """Returns a count of all in flight requests by (method, server_name)
@@ -117,13 +122,18 @@ def _get_in_flight_counts():
     Returns:
         dict[tuple[str, str], int]
     """
-    for rm in _in_flight_requests:
+    # Cast to a list to prevent it changing while the Prometheus
+    # thread is collecting metrics
+    with _in_flight_requests_lock:
+        reqs = list(_in_flight_requests)
+
+    for rm in reqs:
         rm.update_metrics()
 
     # Map from (method, name) -> int, the number of in flight requests of that
     # type
     counts = {}
-    for rm in _in_flight_requests:
+    for rm in reqs:
         key = (rm.method, rm.name,)
         counts[key] = counts.get(key, 0) + 1
 
@@ -131,7 +141,7 @@ def _get_in_flight_counts():
 
 
 LaterGauge(
-    "synapse_http_request_metrics_in_flight_requests_count",
+    "synapse_http_server_in_flight_requests_count",
     "",
     ["method", "servlet"],
     _get_in_flight_counts,
@@ -145,12 +155,16 @@ class RequestMetrics(object):
         self.name = name
         self.method = method
 
-        self._request_stats = _RequestStats.from_context(self.start_context)
+        # _request_stats records resource usage that we have already added
+        # to the "in flight" metrics.
+        self._request_stats = self.start_context.get_resource_usage()
 
-        _in_flight_requests.add(self)
+        with _in_flight_requests_lock:
+            _in_flight_requests.add(self)
 
     def stop(self, time_sec, request):
-        _in_flight_requests.discard(self)
+        with _in_flight_requests_lock:
+            _in_flight_requests.discard(self)
 
         context = LoggingContext.current_context()
 
@@ -165,26 +179,32 @@ class RequestMetrics(object):
                 )
                 return
 
-        outgoing_responses_counter.labels(request.method, str(request.code)).inc()
+        response_code = str(request.code)
+
+        outgoing_responses_counter.labels(request.method, response_code).inc()
 
         response_count.labels(request.method, self.name, tag).inc()
 
-        response_timer.labels(request.method, self.name, tag).observe(
+        response_timer.labels(request.method, self.name, tag, response_code).observe(
             time_sec - self.start
         )
 
-        ru_utime, ru_stime = context.get_resource_usage()
+        resource_usage = context.get_resource_usage()
 
-        response_ru_utime.labels(request.method, self.name, tag).inc(ru_utime)
-        response_ru_stime.labels(request.method, self.name, tag).inc(ru_stime)
+        response_ru_utime.labels(request.method, self.name, tag).inc(
+            resource_usage.ru_utime,
+        )
+        response_ru_stime.labels(request.method, self.name, tag).inc(
+            resource_usage.ru_stime,
+        )
         response_db_txn_count.labels(request.method, self.name, tag).inc(
-            context.db_txn_count
+            resource_usage.db_txn_count
         )
         response_db_txn_duration.labels(request.method, self.name, tag).inc(
-            context.db_txn_duration_sec
+            resource_usage.db_txn_duration_sec
         )
         response_db_sched_duration.labels(request.method, self.name, tag).inc(
-            context.db_sched_duration_sec
+            resource_usage.db_sched_duration_sec
         )
 
         response_size.labels(request.method, self.name, tag).inc(request.sentLength)
@@ -197,7 +217,10 @@ class RequestMetrics(object):
     def update_metrics(self):
         """Updates the in flight metrics with values from this request.
         """
-        diff = self._request_stats.update(self.start_context)
+        new_stats = self.start_context.get_resource_usage()
+
+        diff = new_stats - self._request_stats
+        self._request_stats = new_stats
 
         in_flight_requests_ru_utime.labels(self.method, self.name).inc(diff.ru_utime)
         in_flight_requests_ru_stime.labels(self.method, self.name).inc(diff.ru_stime)
@@ -213,61 +236,3 @@ class RequestMetrics(object):
         in_flight_requests_db_sched_duration.labels(self.method, self.name).inc(
             diff.db_sched_duration_sec
         )
-
-
-class _RequestStats(object):
-    """Keeps tracks of various metrics for an in flight request.
-    """
-
-    __slots__ = [
-        "ru_utime",
-        "ru_stime",
-        "db_txn_count",
-        "db_txn_duration_sec",
-        "db_sched_duration_sec",
-    ]
-
-    def __init__(
-        self, ru_utime, ru_stime, db_txn_count, db_txn_duration_sec, db_sched_duration_sec
-    ):
-        self.ru_utime = ru_utime
-        self.ru_stime = ru_stime
-        self.db_txn_count = db_txn_count
-        self.db_txn_duration_sec = db_txn_duration_sec
-        self.db_sched_duration_sec = db_sched_duration_sec
-
-    @staticmethod
-    def from_context(context):
-        ru_utime, ru_stime = context.get_resource_usage()
-
-        return _RequestStats(
-            ru_utime, ru_stime,
-            context.db_txn_count,
-            context.db_txn_duration_sec,
-            context.db_sched_duration_sec,
-        )
-
-    def update(self, context):
-        """Updates the current values and returns the difference between the
-        old and new values.
-
-        Returns:
-            _RequestStats: The difference between the old and new values
-        """
-        new = _RequestStats.from_context(context)
-
-        diff = _RequestStats(
-            new.ru_utime - self.ru_utime,
-            new.ru_stime - self.ru_stime,
-            new.db_txn_count - self.db_txn_count,
-            new.db_txn_duration_sec - self.db_txn_duration_sec,
-            new.db_sched_duration_sec - self.db_sched_duration_sec,
-        )
-
-        self.ru_utime = new.ru_utime
-        self.ru_stime = new.ru_stime
-        self.db_txn_count = new.db_txn_count
-        self.db_txn_duration_sec = new.db_txn_duration_sec
-        self.db_sched_duration_sec = new.db_sched_duration_sec
-
-        return diff
diff --git a/synapse/http/server.py b/synapse/http/server.py
index bc09b8b2be..2d5c23e673 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -13,35 +13,38 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
 import cgi
-from six.moves import http_client
+import collections
+import logging
 
-from synapse.api.errors import (
-    cs_exception, SynapseError, CodeMessageException, UnrecognizedRequestError, Codes
-)
-from synapse.http.request_metrics import (
-    requests_counter,
-)
-from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
-from synapse.util.caches import intern_dict
-from synapse.util.metrics import Measure
-import synapse.metrics
-import synapse.events
+from six import PY3
+from six.moves import http_client, urllib
 
-from canonicaljson import (
-    encode_canonical_json, encode_pretty_printed_json
-)
+from canonicaljson import encode_canonical_json, encode_pretty_printed_json, json
 
 from twisted.internet import defer
 from twisted.python import failure
-from twisted.web import server, resource
+from twisted.web import resource
 from twisted.web.server import NOT_DONE_YET
+from twisted.web.static import NoRangeStaticProducer
 from twisted.web.util import redirectTo
 
-import collections
-import logging
-import urllib
-import simplejson
+import synapse.events
+import synapse.metrics
+from synapse.api.errors import (
+    CodeMessageException,
+    Codes,
+    SynapseError,
+    UnrecognizedRequestError,
+)
+from synapse.util.caches import intern_dict
+from synapse.util.logcontext import preserve_fn
+
+if PY3:
+    from io import BytesIO
+else:
+    from cStringIO import StringIO as BytesIO
 
 logger = logging.getLogger(__name__)
 
@@ -61,11 +64,10 @@ HTML_ERROR_TEMPLATE = """<!DOCTYPE html>
 def wrap_json_request_handler(h):
     """Wraps a request handler method with exception handling.
 
-    Also adds logging as per wrap_request_handler_with_logging.
+    Also does the wrapping with request.processing as per wrap_async_request_handler.
 
     The handler method must have a signature of "handle_foo(self, request)",
-    where "self" must have a "clock" attribute (and "request" must be a
-    SynapseRequest).
+    where "request" must be a SynapseRequest.
 
     The handler must return a deferred. If the deferred succeeds we assume that
     a response has been sent. If the deferred fails with a SynapseError we use
@@ -77,16 +79,13 @@ def wrap_json_request_handler(h):
     def wrapped_request_handler(self, request):
         try:
             yield h(self, request)
-        except CodeMessageException as e:
+        except SynapseError as e:
             code = e.code
-            if isinstance(e, SynapseError):
-                logger.info(
-                    "%s SynapseError: %s - %s", request, code, e.msg
-                )
-            else:
-                logger.exception(e)
+            logger.info(
+                "%s SynapseError: %s - %s", request, code, e.msg
+            )
             respond_with_json(
-                request, code, cs_exception(e), send_cors=True,
+                request, code, e.error_dict(), send_cors=True,
                 pretty_print=_request_user_agent_is_curl(request),
             )
 
@@ -112,24 +111,23 @@ def wrap_json_request_handler(h):
                 pretty_print=_request_user_agent_is_curl(request),
             )
 
-    return wrap_request_handler_with_logging(wrapped_request_handler)
+    return wrap_async_request_handler(wrapped_request_handler)
 
 
 def wrap_html_request_handler(h):
     """Wraps a request handler method with exception handling.
 
-    Also adds logging as per wrap_request_handler_with_logging.
+    Also does the wrapping with request.processing as per wrap_async_request_handler.
 
     The handler method must have a signature of "handle_foo(self, request)",
-    where "self" must have a "clock" attribute (and "request" must be a
-    SynapseRequest).
+    where "request" must be a SynapseRequest.
     """
     def wrapped_request_handler(self, request):
         d = defer.maybeDeferred(h, self, request)
         d.addErrback(_return_html_error, request)
         return d
 
-    return wrap_request_handler_with_logging(wrapped_request_handler)
+    return wrap_async_request_handler(wrapped_request_handler)
 
 
 def _return_html_error(f, request):
@@ -174,46 +172,26 @@ def _return_html_error(f, request):
     finish_request(request)
 
 
-def wrap_request_handler_with_logging(h):
-    """Wraps a request handler to provide logging and metrics
+def wrap_async_request_handler(h):
+    """Wraps an async request handler so that it calls request.processing.
+
+    This helps ensure that work done by the request handler after the request is completed
+    is correctly recorded against the request metrics/logs.
 
     The handler method must have a signature of "handle_foo(self, request)",
-    where "self" must have a "clock" attribute (and "request" must be a
-    SynapseRequest).
+    where "request" must be a SynapseRequest.
 
-    As well as calling `request.processing` (which will log the response and
-    duration for this request), the wrapped request handler will insert the
-    request id into the logging context.
+    The handler may return a deferred, in which case the completion of the request isn't
+    logged until the deferred completes.
     """
     @defer.inlineCallbacks
-    def wrapped_request_handler(self, request):
-        """
-        Args:
-            self:
-            request (synapse.http.site.SynapseRequest):
-        """
+    def wrapped_async_request_handler(self, request):
+        with request.processing():
+            yield h(self, request)
 
-        request_id = request.get_request_id()
-        with LoggingContext(request_id) as request_context:
-            request_context.request = request_id
-            with Measure(self.clock, "wrapped_request_handler"):
-                # we start the request metrics timer here with an initial stab
-                # at the servlet name. For most requests that name will be
-                # JsonResource (or a subclass), and JsonResource._async_render
-                # will update it once it picks a servlet.
-                servlet_name = self.__class__.__name__
-                with request.processing(servlet_name):
-                    with PreserveLoggingContext(request_context):
-                        d = defer.maybeDeferred(h, self, request)
-
-                        # record the arrival of the request *after*
-                        # dispatching to the handler, so that the handler
-                        # can update the servlet name in the request
-                        # metrics
-                        requests_counter.labels(request.method,
-                                                request.request_metrics.name).inc()
-                        yield d
-    return wrapped_request_handler
+    # we need to preserve_fn here, because the synchronous render method won't yield for
+    # us (obviously)
+    return preserve_fn(wrapped_async_request_handler)
 
 
 class HttpServer(object):
@@ -265,6 +243,7 @@ class JsonResource(HttpServer, resource.Resource):
         self.hs = hs
 
     def register_paths(self, method, path_patterns, callback):
+        method = method.encode("utf-8")  # method is bytes on py3
         for path_pattern in path_patterns:
             logger.debug("Registering for %s %s", method, path_pattern.pattern)
             self.path_regexs.setdefault(method, []).append(
@@ -275,7 +254,7 @@ class JsonResource(HttpServer, resource.Resource):
         """ This gets called by twisted every time someone sends us a request.
         """
         self._async_render(request)
-        return server.NOT_DONE_YET
+        return NOT_DONE_YET
 
     @wrap_json_request_handler
     @defer.inlineCallbacks
@@ -297,8 +276,19 @@ class JsonResource(HttpServer, resource.Resource):
         # here. If it throws an exception, that is handled by the wrapper
         # installed by @request_handler.
 
+        def _unquote(s):
+            if PY3:
+                # On Python 3, unquote is unicode -> unicode
+                return urllib.parse.unquote(s)
+            else:
+                # On Python 2, unquote is bytes -> bytes We need to encode the
+                # URL again (as it was decoded by _get_handler_for request), as
+                # ASCII because it's a URL, and then decode it to get the UTF-8
+                # characters that were quoted.
+                return urllib.parse.unquote(s.encode('ascii')).decode('utf8')
+
         kwargs = intern_dict({
-            name: urllib.unquote(value).decode("UTF-8") if value else value
+            name: _unquote(value) if value else value
             for name, value in group_dict.items()
         })
 
@@ -314,9 +304,9 @@ class JsonResource(HttpServer, resource.Resource):
             request (twisted.web.http.Request):
 
         Returns:
-            Tuple[Callable, dict[str, str]]: callback method, and the dict
-                mapping keys to path components as specified in the handler's
-                path match regexp.
+            Tuple[Callable, dict[unicode, unicode]]: callback method, and the
+                dict mapping keys to path components as specified in the
+                handler's path match regexp.
 
                 The callback will normally be a method registered via
                 register_paths, so will return (possibly via Deferred) either
@@ -328,7 +318,7 @@ class JsonResource(HttpServer, resource.Resource):
         # Loop through all the registered callbacks to check if the method
         # and path regex match
         for path_entry in self.path_regexs.get(request.method, []):
-            m = path_entry.pattern.match(request.path)
+            m = path_entry.pattern.match(request.path.decode('ascii'))
             if m:
                 # We found a match!
                 return path_entry.callback, m.groupdict()
@@ -384,7 +374,7 @@ class RootRedirect(resource.Resource):
         self.url = path
 
     def render_GET(self, request):
-        return redirectTo(self.url, request)
+        return redirectTo(self.url.encode('ascii'), request)
 
     def getChild(self, name, request):
         if len(name) == 0:
@@ -405,12 +395,13 @@ def respond_with_json(request, code, json_object, send_cors=False,
         return
 
     if pretty_print:
-        json_bytes = encode_pretty_printed_json(json_object) + "\n"
+        json_bytes = encode_pretty_printed_json(json_object) + b"\n"
     else:
         if canonical_json or synapse.events.USE_FROZEN_DICTS:
+            # canonicaljson already encodes to bytes
             json_bytes = encode_canonical_json(json_object)
         else:
-            json_bytes = simplejson.dumps(json_object)
+            json_bytes = json.dumps(json_object).encode("utf-8")
 
     return respond_with_json_bytes(
         request, code, json_bytes,
@@ -440,8 +431,12 @@ def respond_with_json_bytes(request, code, json_bytes, send_cors=False,
     if send_cors:
         set_cors_headers(request)
 
-    request.write(json_bytes)
-    finish_request(request)
+    # todo: we can almost certainly avoid this copy and encode the json straight into
+    # the bytesIO, but it would involve faffing around with string->bytes wrappers.
+    bytes_io = BytesIO(json_bytes)
+
+    producer = NoRangeStaticProducer(request, bytes_io)
+    producer.start()
     return NOT_DONE_YET
 
 
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index ef8e62901b..a1e4b88e6d 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -15,10 +15,11 @@
 
 """ This module contains base REST classes for constructing REST servlets. """
 
-from synapse.api.errors import SynapseError, Codes
-
 import logging
-import simplejson
+
+from canonicaljson import json
+
+from synapse.api.errors import Codes, SynapseError
 
 logger = logging.getLogger(__name__)
 
@@ -28,7 +29,7 @@ def parse_integer(request, name, default=None, required=False):
 
     Args:
         request: the twisted HTTP request.
-        name (str): the name of the query parameter.
+        name (bytes/unicode): the name of the query parameter.
         default (int|None): value to use if the parameter is absent, defaults
             to None.
         required (bool): whether to raise a 400 SynapseError if the
@@ -45,6 +46,10 @@ def parse_integer(request, name, default=None, required=False):
 
 
 def parse_integer_from_args(args, name, default=None, required=False):
+
+    if not isinstance(name, bytes):
+        name = name.encode('ascii')
+
     if name in args:
         try:
             return int(args[name][0])
@@ -64,7 +69,7 @@ def parse_boolean(request, name, default=None, required=False):
 
     Args:
         request: the twisted HTTP request.
-        name (str): the name of the query parameter.
+        name (bytes/unicode): the name of the query parameter.
         default (bool|None): value to use if the parameter is absent, defaults
             to None.
         required (bool): whether to raise a 400 SynapseError if the
@@ -82,11 +87,15 @@ def parse_boolean(request, name, default=None, required=False):
 
 
 def parse_boolean_from_args(args, name, default=None, required=False):
+
+    if not isinstance(name, bytes):
+        name = name.encode('ascii')
+
     if name in args:
         try:
             return {
-                "true": True,
-                "false": False,
+                b"true": True,
+                b"false": False,
             }[args[name][0]]
         except Exception:
             message = (
@@ -103,21 +112,29 @@ def parse_boolean_from_args(args, name, default=None, required=False):
 
 
 def parse_string(request, name, default=None, required=False,
-                 allowed_values=None, param_type="string"):
-    """Parse a string parameter from the request query string.
+                 allowed_values=None, param_type="string", encoding='ascii'):
+    """
+    Parse a string parameter from the request query string.
+
+    If encoding is not None, the content of the query param will be
+    decoded to Unicode using the encoding, otherwise it will be encoded
 
     Args:
         request: the twisted HTTP request.
-        name (str): the name of the query parameter.
-        default (str|None): value to use if the parameter is absent, defaults
-            to None.
+        name (bytes/unicode): the name of the query parameter.
+        default (bytes/unicode|None): value to use if the parameter is absent,
+            defaults to None. Must be bytes if encoding is None.
         required (bool): whether to raise a 400 SynapseError if the
             parameter is absent, defaults to False.
-        allowed_values (list[str]): List of allowed values for the string,
-            or None if any value is allowed, defaults to None
+        allowed_values (list[bytes/unicode]): List of allowed values for the
+            string, or None if any value is allowed, defaults to None. Must be
+            the same type as name, if given.
+        encoding: The encoding to decode the name to, and decode the string
+            content with.
 
     Returns:
-        str|None: A string value or the default.
+        bytes/unicode|None: A string value or the default. Unicode if encoding
+        was given, bytes otherwise.
 
     Raises:
         SynapseError if the parameter is absent and required, or if the
@@ -125,14 +142,22 @@ def parse_string(request, name, default=None, required=False,
             is not one of those allowed values.
     """
     return parse_string_from_args(
-        request.args, name, default, required, allowed_values, param_type,
+        request.args, name, default, required, allowed_values, param_type, encoding
     )
 
 
 def parse_string_from_args(args, name, default=None, required=False,
-                           allowed_values=None, param_type="string"):
+                           allowed_values=None, param_type="string", encoding='ascii'):
+
+    if not isinstance(name, bytes):
+        name = name.encode('ascii')
+
     if name in args:
         value = args[name][0]
+
+        if encoding:
+            value = value.decode(encoding)
+
         if allowed_values is not None and value not in allowed_values:
             message = "Query parameter %r must be one of [%s]" % (
                 name, ", ".join(repr(v) for v in allowed_values)
@@ -145,6 +170,10 @@ def parse_string_from_args(args, name, default=None, required=False,
             message = "Missing %s query parameter %r" % (param_type, name)
             raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
         else:
+
+            if encoding and isinstance(default, bytes):
+                return default.decode(encoding)
+
             return default
 
 
@@ -170,8 +199,16 @@ def parse_json_value_from_request(request, allow_empty_body=False):
     if not content_bytes and allow_empty_body:
         return None
 
+    # Decode to Unicode so that simplejson will return Unicode strings on
+    # Python 2
+    try:
+        content_unicode = content_bytes.decode('utf8')
+    except UnicodeDecodeError:
+        logger.warn("Unable to decode UTF-8")
+        raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
+
     try:
-        content = simplejson.loads(content_bytes)
+        content = json.loads(content_unicode)
     except Exception as e:
         logger.warn("Unable to parse JSON: %s", e)
         raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
@@ -205,7 +242,7 @@ def parse_json_object_from_request(request, allow_empty_body=False):
     return content
 
 
-def assert_params_in_request(body, required):
+def assert_params_in_dict(body, required):
     absent = []
     for k in required:
         if k not in body:
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 2664006f8c..88ed3714f9 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -11,16 +11,15 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 import contextlib
 import logging
 import time
 
-from twisted.web.server import Site, Request
+from twisted.web.server import Request, Site
 
 from synapse.http import redact_uri
-from synapse.http.request_metrics import RequestMetrics
-from synapse.util.logcontext import LoggingContext
+from synapse.http.request_metrics import RequestMetrics, requests_counter
+from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
 
 logger = logging.getLogger(__name__)
 
@@ -34,24 +33,43 @@ class SynapseRequest(Request):
 
     It extends twisted's twisted.web.server.Request, and adds:
      * Unique request ID
+     * A log context associated with the request
      * Redaction of access_token query-params in __repr__
      * Logging at start and end
      * Metrics to record CPU, wallclock and DB time by endpoint.
 
-    It provides a method `processing` which should be called by the Resource
-    which is handling the request, and returns a context manager.
+    It also provides a method `processing`, which returns a context manager. If this
+    method is called, the request won't be logged until the context manager is closed;
+    this is useful for asynchronous request handlers which may go on processing the
+    request even after the client has disconnected.
 
+    Attributes:
+        logcontext(LoggingContext) : the log context for this request
     """
-    def __init__(self, site, *args, **kw):
-        Request.__init__(self, *args, **kw)
+    def __init__(self, site, channel, *args, **kw):
+        Request.__init__(self, channel, *args, **kw)
         self.site = site
+        self._channel = channel     # this is used by the tests
         self.authenticated_entity = None
         self.start_time = 0
 
+        # we can't yet create the logcontext, as we don't know the method.
+        self.logcontext = None
+
         global _next_request_seq
         self.request_seq = _next_request_seq
         _next_request_seq += 1
 
+        # whether an asynchronous request handler has called processing()
+        self._is_processing = False
+
+        # the time when the asynchronous request handler completed its processing
+        self._processing_finished_time = None
+
+        # what time we finished sending the response to the client (or the connection
+        # dropped)
+        self.finish_time = None
+
     def __repr__(self):
         # We overwrite this so that we don't log ``access_token``
         return '<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>' % (
@@ -73,11 +91,116 @@ class SynapseRequest(Request):
         return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1]
 
     def render(self, resrc):
+        # this is called once a Resource has been found to serve the request; in our
+        # case the Resource in question will normally be a JsonResource.
+
+        # create a LogContext for this request
+        request_id = self.get_request_id()
+        logcontext = self.logcontext = LoggingContext(request_id)
+        logcontext.request = request_id
+
         # override the Server header which is set by twisted
         self.setHeader("Server", self.site.server_version_string)
-        return Request.render(self, resrc)
+
+        with PreserveLoggingContext(self.logcontext):
+            # we start the request metrics timer here with an initial stab
+            # at the servlet name. For most requests that name will be
+            # JsonResource (or a subclass), and JsonResource._async_render
+            # will update it once it picks a servlet.
+            servlet_name = resrc.__class__.__name__
+            self._started_processing(servlet_name)
+
+            Request.render(self, resrc)
+
+            # record the arrival of the request *after*
+            # dispatching to the handler, so that the handler
+            # can update the servlet name in the request
+            # metrics
+            requests_counter.labels(self.method,
+                                    self.request_metrics.name).inc()
+
+    @contextlib.contextmanager
+    def processing(self):
+        """Record the fact that we are processing this request.
+
+        Returns a context manager; the correct way to use this is:
+
+        @defer.inlineCallbacks
+        def handle_request(request):
+            with request.processing("FooServlet"):
+                yield really_handle_the_request()
+
+        Once the context manager is closed, the completion of the request will be logged,
+        and the various metrics will be updated.
+        """
+        if self._is_processing:
+            raise RuntimeError("Request is already processing")
+        self._is_processing = True
+
+        try:
+            yield
+        except Exception:
+            # this should already have been caught, and sent back to the client as a 500.
+            logger.exception("Asynchronous messge handler raised an uncaught exception")
+        finally:
+            # the request handler has finished its work and either sent the whole response
+            # back, or handed over responsibility to a Producer.
+
+            self._processing_finished_time = time.time()
+            self._is_processing = False
+
+            # if we've already sent the response, log it now; otherwise, we wait for the
+            # response to be sent.
+            if self.finish_time is not None:
+                self._finished_processing()
+
+    def finish(self):
+        """Called when all response data has been written to this Request.
+
+        Overrides twisted.web.server.Request.finish to record the finish time and do
+        logging.
+        """
+        self.finish_time = time.time()
+        Request.finish(self)
+        if not self._is_processing:
+            with PreserveLoggingContext(self.logcontext):
+                self._finished_processing()
+
+    def connectionLost(self, reason):
+        """Called when the client connection is closed before the response is written.
+
+        Overrides twisted.web.server.Request.connectionLost to record the finish time and
+        do logging.
+        """
+        self.finish_time = time.time()
+        Request.connectionLost(self, reason)
+
+        # we only get here if the connection to the client drops before we send
+        # the response.
+        #
+        # It's useful to log it here so that we can get an idea of when
+        # the client disconnects.
+        with PreserveLoggingContext(self.logcontext):
+            logger.warn(
+                "Error processing request %r: %s %s", self, reason.type, reason.value,
+            )
+
+            if not self._is_processing:
+                self._finished_processing()
 
     def _started_processing(self, servlet_name):
+        """Record the fact that we are processing this request.
+
+        This will log the request's arrival. Once the request completes,
+        be sure to call finished_processing.
+
+        Args:
+            servlet_name (str): the name of the servlet which will be
+                processing this request. This is used in the metrics.
+
+                It is possible to update this afterwards by updating
+                self.request_metrics.name.
+        """
         self.start_time = time.time()
         self.request_metrics = RequestMetrics()
         self.request_metrics.start(
@@ -93,72 +216,79 @@ class SynapseRequest(Request):
         )
 
     def _finished_processing(self):
-        try:
-            context = LoggingContext.current_context()
-            ru_utime, ru_stime = context.get_resource_usage()
-            db_txn_count = context.db_txn_count
-            db_txn_duration_sec = context.db_txn_duration_sec
-            db_sched_duration_sec = context.db_sched_duration_sec
-        except Exception:
-            ru_utime, ru_stime = (0, 0)
-            db_txn_count, db_txn_duration_sec = (0, 0)
+        """Log the completion of this request and update the metrics
+        """
 
-        end_time = time.time()
+        if self.logcontext is None:
+            # this can happen if the connection closed before we read the
+            # headers (so render was never called). In that case we'll already
+            # have logged a warning, so just bail out.
+            return
+
+        usage = self.logcontext.get_resource_usage()
+
+        if self._processing_finished_time is None:
+            # we completed the request without anything calling processing()
+            self._processing_finished_time = time.time()
+
+        # the time between receiving the request and the request handler finishing
+        processing_time = self._processing_finished_time - self.start_time
+
+        # the time between the request handler finishing and the response being sent
+        # to the client (nb may be negative)
+        response_send_time = self.finish_time - self._processing_finished_time
+
+        # need to decode as it could be raw utf-8 bytes
+        # from a IDN servname in an auth header
+        authenticated_entity = self.authenticated_entity
+        if authenticated_entity is not None and isinstance(authenticated_entity, bytes):
+            authenticated_entity = authenticated_entity.decode("utf-8", "replace")
+
+        # ...or could be raw utf-8 bytes in the User-Agent header.
+        # N.B. if you don't do this, the logger explodes cryptically
+        # with maximum recursion trying to log errors about
+        # the charset problem.
+        # c.f. https://github.com/matrix-org/synapse/issues/3471
+        user_agent = self.get_user_agent()
+        if user_agent is not None:
+            user_agent = user_agent.decode("utf-8", "replace")
+        else:
+            user_agent = "-"
+
+        code = str(self.code)
+        if not self.finished:
+            # we didn't send the full response before we gave up (presumably because
+            # the connection dropped)
+            code += "!"
 
         self.site.access_logger.info(
             "%s - %s - {%s}"
-            " Processed request: %.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
-            " %sB %s \"%s %s %s\" \"%s\"",
+            " Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
+            " %sB %s \"%s %s %s\" \"%s\" [%d dbevts]",
             self.getClientIP(),
             self.site.site_tag,
-            self.authenticated_entity,
-            end_time - self.start_time,
-            ru_utime,
-            ru_stime,
-            db_sched_duration_sec,
-            db_txn_duration_sec,
-            int(db_txn_count),
+            authenticated_entity,
+            processing_time,
+            response_send_time,
+            usage.ru_utime,
+            usage.ru_stime,
+            usage.db_sched_duration_sec,
+            usage.db_txn_duration_sec,
+            int(usage.db_txn_count),
             self.sentLength,
-            self.code,
+            code,
             self.method,
             self.get_redacted_uri(),
             self.clientproto,
-            self.get_user_agent(),
+            user_agent,
+            usage.evt_db_fetch_count,
         )
 
         try:
-            self.request_metrics.stop(end_time, self)
+            self.request_metrics.stop(self.finish_time, self)
         except Exception as e:
             logger.warn("Failed to stop metrics: %r", e)
 
-    @contextlib.contextmanager
-    def processing(self, servlet_name):
-        """Record the fact that we are processing this request.
-
-        Returns a context manager; the correct way to use this is:
-
-        @defer.inlineCallbacks
-        def handle_request(request):
-            with request.processing("FooServlet"):
-                yield really_handle_the_request()
-
-        This will log the request's arrival. Once the context manager is
-        closed, the completion of the request will be logged, and the various
-        metrics will be updated.
-
-        Args:
-            servlet_name (str): the name of the servlet which will be
-                processing this request. This is used in the metrics.
-
-                It is possible to update this afterwards by updating
-                self.request_metrics.servlet_name.
-        """
-        # TODO: we should probably just move this into render() and finish(),
-        # to save having to call a separate method.
-        self._started_processing(servlet_name)
-        yield
-        self._finished_processing()
-
 
 class XForwardedForRequest(SynapseRequest):
     def __init__(self, *args, **kw):
@@ -204,7 +334,7 @@ class SynapseSite(Site):
         proxied = config.get("x_forwarded", False)
         self.requestFactory = SynapseRequestFactory(self, proxied)
         self.access_logger = logging.getLogger(logger_name)
-        self.server_version_string = server_version_string
+        self.server_version_string = server_version_string.encode('ascii')
 
     def log(self, request):
         pass
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 429e79c472..550f8443f7 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -13,20 +13,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
 import functools
-import time
 import gc
+import logging
 import os
 import platform
-import attr
+import time
 
-from prometheus_client import Gauge, Histogram, Counter
-from prometheus_client.core import GaugeMetricFamily, REGISTRY
+import attr
+from prometheus_client import Counter, Gauge, Histogram
+from prometheus_client.core import REGISTRY, GaugeMetricFamily
 
 from twisted.internet import reactor
 
-
 logger = logging.getLogger(__name__)
 
 running_on_pypy = platform.python_implementation() == "PyPy"
@@ -62,7 +61,7 @@ class LaterGauge(object):
             calls = self.caller()
         except Exception:
             logger.exception(
-                "Exception running callback for LaterGuage(%s)",
+                "Exception running callback for LaterGauge(%s)",
                 self.name,
             )
             yield g
@@ -140,14 +139,15 @@ gc_time = Histogram(
 class GCCounts(object):
 
     def collect(self):
-        cm = GaugeMetricFamily("python_gc_counts", "GC cycle counts", labels=["gen"])
+        cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"])
         for n, m in enumerate(gc.get_count()):
             cm.add_metric([str(n)], m)
 
         yield cm
 
 
-REGISTRY.register(GCCounts())
+if not running_on_pypy:
+    REGISTRY.register(GCCounts())
 
 #
 # Twisted reactor metrics
@@ -174,6 +174,19 @@ sent_transactions_counter = Counter("synapse_federation_client_sent_transactions
 
 events_processed_counter = Counter("synapse_federation_client_events_processed", "")
 
+event_processing_loop_counter = Counter(
+    "synapse_event_processing_loop_count",
+    "Event processing loop iterations",
+    ["name"],
+)
+
+event_processing_loop_room_count = Counter(
+    "synapse_event_processing_loop_room_count",
+    "Rooms seen per event processing loop iteration",
+    ["name"],
+)
+
+
 # Used to track where various components have processed in the event stream,
 # e.g. federation sending, appservice sending, etc.
 event_processing_positions = Gauge("synapse_event_processing_positions", "", ["name"])
@@ -190,6 +203,22 @@ event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"
 # finished being processed.
 event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"])
 
+last_ticked = time.time()
+
+
+class ReactorLastSeenMetric(object):
+
+    def collect(self):
+        cm = GaugeMetricFamily(
+            "python_twisted_reactor_last_seen",
+            "Seconds since the Twisted reactor was last seen",
+        )
+        cm.add_metric([], time.time() - last_ticked)
+        yield cm
+
+
+REGISTRY.register(ReactorLastSeenMetric())
+
 
 def runUntilCurrentTimer(func):
 
@@ -222,6 +251,11 @@ def runUntilCurrentTimer(func):
         tick_time.observe(end - start)
         pending_calls_metric.observe(num_pending)
 
+        # Update the time we last ticked, for the metric to test whether
+        # Synapse's reactor has frozen
+        global last_ticked
+        last_ticked = end
+
         if running_on_pypy:
             return ret
 
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
new file mode 100644
index 0000000000..167167be0a
--- /dev/null
+++ b/synapse/metrics/background_process_metrics.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import threading
+
+import six
+
+from prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily
+
+from twisted.internet import defer
+
+from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
+
+_background_process_start_count = Counter(
+    "synapse_background_process_start_count",
+    "Number of background processes started",
+    ["name"],
+)
+
+# we set registry=None in all of these to stop them getting registered with
+# the default registry. Instead we collect them all via the CustomCollector,
+# which ensures that we can update them before they are collected.
+#
+_background_process_ru_utime = Counter(
+    "synapse_background_process_ru_utime_seconds",
+    "User CPU time used by background processes, in seconds",
+    ["name"],
+    registry=None,
+)
+
+_background_process_ru_stime = Counter(
+    "synapse_background_process_ru_stime_seconds",
+    "System CPU time used by background processes, in seconds",
+    ["name"],
+    registry=None,
+)
+
+_background_process_db_txn_count = Counter(
+    "synapse_background_process_db_txn_count",
+    "Number of database transactions done by background processes",
+    ["name"],
+    registry=None,
+)
+
+_background_process_db_txn_duration = Counter(
+    "synapse_background_process_db_txn_duration_seconds",
+    ("Seconds spent by background processes waiting for database "
+     "transactions, excluding scheduling time"),
+    ["name"],
+    registry=None,
+)
+
+_background_process_db_sched_duration = Counter(
+    "synapse_background_process_db_sched_duration_seconds",
+    "Seconds spent by background processes waiting for database connections",
+    ["name"],
+    registry=None,
+)
+
+# map from description to a counter, so that we can name our logcontexts
+# incrementally. (It actually duplicates _background_process_start_count, but
+# it's much simpler to do so than to try to combine them.)
+_background_process_counts = dict()  # type: dict[str, int]
+
+# map from description to the currently running background processes.
+#
+# it's kept as a dict of sets rather than a big set so that we can keep track
+# of process descriptions that no longer have any active processes.
+_background_processes = dict()  # type: dict[str, set[_BackgroundProcess]]
+
+# A lock that covers the above dicts
+_bg_metrics_lock = threading.Lock()
+
+
+class _Collector(object):
+    """A custom metrics collector for the background process metrics.
+
+    Ensures that all of the metrics are up-to-date with any in-flight processes
+    before they are returned.
+    """
+    def collect(self):
+        background_process_in_flight_count = GaugeMetricFamily(
+            "synapse_background_process_in_flight_count",
+            "Number of background processes in flight",
+            labels=["name"],
+        )
+
+        # We copy the dict so that it doesn't change from underneath us
+        with _bg_metrics_lock:
+            _background_processes_copy = dict(_background_processes)
+
+        for desc, processes in six.iteritems(_background_processes_copy):
+            background_process_in_flight_count.add_metric(
+                (desc,), len(processes),
+            )
+            for process in processes:
+                process.update_metrics()
+
+        yield background_process_in_flight_count
+
+        # now we need to run collect() over each of the static Counters, and
+        # yield each metric they return.
+        for m in (
+                _background_process_ru_utime,
+                _background_process_ru_stime,
+                _background_process_db_txn_count,
+                _background_process_db_txn_duration,
+                _background_process_db_sched_duration,
+        ):
+            for r in m.collect():
+                yield r
+
+
+REGISTRY.register(_Collector())
+
+
+class _BackgroundProcess(object):
+    def __init__(self, desc, ctx):
+        self.desc = desc
+        self._context = ctx
+        self._reported_stats = None
+
+    def update_metrics(self):
+        """Updates the metrics with values from this process."""
+        new_stats = self._context.get_resource_usage()
+        if self._reported_stats is None:
+            diff = new_stats
+        else:
+            diff = new_stats - self._reported_stats
+        self._reported_stats = new_stats
+
+        _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)
+        _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)
+        _background_process_db_txn_count.labels(self.desc).inc(
+            diff.db_txn_count,
+        )
+        _background_process_db_txn_duration.labels(self.desc).inc(
+            diff.db_txn_duration_sec,
+        )
+        _background_process_db_sched_duration.labels(self.desc).inc(
+            diff.db_sched_duration_sec,
+        )
+
+
+def run_as_background_process(desc, func, *args, **kwargs):
+    """Run the given function in its own logcontext, with resource metrics
+
+    This should be used to wrap processes which are fired off to run in the
+    background, instead of being associated with a particular request.
+
+    It returns a Deferred which completes when the function completes, but it doesn't
+    follow the synapse logcontext rules, which makes it appropriate for passing to
+    clock.looping_call and friends (or for firing-and-forgetting in the middle of a
+    normal synapse inlineCallbacks function).
+
+    Args:
+        desc (str): a description for this background process type
+        func: a function, which may return a Deferred
+        args: positional args for func
+        kwargs: keyword args for func
+
+    Returns: Deferred which returns the result of func, but note that it does not
+        follow the synapse logcontext rules.
+    """
+    @defer.inlineCallbacks
+    def run():
+        with _bg_metrics_lock:
+            count = _background_process_counts.get(desc, 0)
+            _background_process_counts[desc] = count + 1
+
+        _background_process_start_count.labels(desc).inc()
+
+        with LoggingContext(desc) as context:
+            context.request = "%s-%i" % (desc, count)
+            proc = _BackgroundProcess(desc, context)
+
+            with _bg_metrics_lock:
+                _background_processes.setdefault(desc, set()).add(proc)
+
+            try:
+                yield func(*args, **kwargs)
+            finally:
+                proc.update_metrics()
+
+                with _bg_metrics_lock:
+                    _background_processes[desc].remove(proc)
+
+    with PreserveLoggingContext():
+        return run()
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 6dce20a284..82f391481c 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -13,28 +13,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+from collections import namedtuple
+
+from prometheus_client import Counter
+
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import AuthError
 from synapse.handlers.presence import format_user_presence_state
-
-from synapse.util.logutils import log_function
-from synapse.util.async import (
-    ObservableDeferred, add_timeout_to_deferred,
+from synapse.metrics import LaterGauge
+from synapse.types import StreamToken
+from synapse.util.async_helpers import (
     DeferredTimeoutError,
+    ObservableDeferred,
+    add_timeout_to_deferred,
 )
 from synapse.util.logcontext import PreserveLoggingContext, run_in_background
+from synapse.util.logutils import log_function
 from synapse.util.metrics import Measure
-from synapse.types import StreamToken
 from synapse.visibility import filter_events_for_client
-from synapse.metrics import LaterGauge
-
-from collections import namedtuple
-from prometheus_client import Counter
-
-import logging
-
 
 logger = logging.getLogger(__name__)
 
@@ -161,6 +160,7 @@ class Notifier(object):
         self.user_to_user_stream = {}
         self.room_to_user_streams = {}
 
+        self.hs = hs
         self.event_sources = hs.get_event_sources()
         self.store = hs.get_datastore()
         self.pending_new_room_events = []
@@ -274,7 +274,7 @@ class Notifier(object):
             logger.exception("Error notifying application services of event")
 
     def on_new_event(self, stream_key, new_token, users=[], rooms=[]):
-        """ Used to inform listeners that something has happend event wise.
+        """ Used to inform listeners that something has happened event wise.
 
         Will wake up all listeners for the given users and rooms.
         """
@@ -340,6 +340,7 @@ class Notifier(object):
                     add_timeout_to_deferred(
                         listener.deferred,
                         (end_time - now) / 1000.,
+                        self.hs.get_reactor(),
                     )
                     with PreserveLoggingContext():
                         yield listener.deferred
@@ -561,6 +562,7 @@ class Notifier(object):
             add_timeout_to_deferred(
                 listener.deferred.addTimeout,
                 (end_time - now) / 1000.,
+                self.hs.get_reactor(),
             )
             try:
                 with PreserveLoggingContext():
diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py
index 8f619a7a1b..a5de75c48a 100644
--- a/synapse/push/action_generator.py
+++ b/synapse/push/action_generator.py
@@ -13,13 +13,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
+import logging
 
-from .bulk_push_rule_evaluator import BulkPushRuleEvaluator
+from twisted.internet import defer
 
 from synapse.util.metrics import Measure
 
-import logging
+from .bulk_push_rule_evaluator import BulkPushRuleEvaluator
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
index a8ae7bcd6c..8f0682c948 100644
--- a/synapse/push/baserules.py
+++ b/synapse/push/baserules.py
@@ -13,9 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.push.rulekinds import PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP
 import copy
 
+from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP
+
 
 def list_with_base_rules(rawrules):
     """Combine the list of rules set by the user with the default push rules
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index a5cab1f043..8f9a76147f 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -15,21 +15,22 @@
 # limitations under the License.
 
 import logging
+from collections import namedtuple
 
-from twisted.internet import defer
+from six import iteritems, itervalues
 
-from .push_rule_evaluator import PushRuleEvaluatorForEvent
+from prometheus_client import Counter
+
+from twisted.internet import defer
 
-from synapse.event_auth import get_user_power_level
 from synapse.api.constants import EventTypes, Membership
+from synapse.event_auth import get_user_power_level
+from synapse.state import POWER_KEY
+from synapse.util.async_helpers import Linearizer
 from synapse.util.caches import register_cache
 from synapse.util.caches.descriptors import cached
-from synapse.util.async import Linearizer
-from synapse.state import POWER_KEY
 
-from collections import namedtuple
-from prometheus_client import Counter
-from six import itervalues, iteritems
+from .push_rule_evaluator import PushRuleEvaluatorForEvent
 
 logger = logging.getLogger(__name__)
 
@@ -111,7 +112,8 @@ class BulkPushRuleEvaluator(object):
 
     @defer.inlineCallbacks
     def _get_power_levels_and_sender_level(self, event, context):
-        pl_event_id = context.prev_state_ids.get(POWER_KEY)
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        pl_event_id = prev_state_ids.get(POWER_KEY)
         if pl_event_id:
             # fastpath: if there's a power level event, that's all we need, and
             # not having a power level event is an extreme edge case
@@ -119,7 +121,7 @@ class BulkPushRuleEvaluator(object):
             auth_events = {POWER_KEY: pl_event}
         else:
             auth_events_ids = yield self.auth.compute_auth_events(
-                event, context.prev_state_ids, for_verification=False,
+                event, prev_state_ids, for_verification=False,
             )
             auth_events = yield self.store.get_events(auth_events_ids)
             auth_events = {
@@ -303,7 +305,7 @@ class RulesForRoom(object):
 
                 push_rules_delta_state_cache_metric.inc_hits()
             else:
-                current_state_ids = context.current_state_ids
+                current_state_ids = yield context.get_current_state_ids(self.store)
                 push_rules_delta_state_cache_metric.inc_misses()
 
             push_rules_state_size_counter.inc(len(current_state_ids))
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index e0331b2d2d..ecbf364a5e 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -13,12 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.push.rulekinds import (
-    PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP
-)
-
 import copy
 
+from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP
+
 
 def format_push_rules_for_user(user, ruleslist):
     """Converts a list of rawrules and a enabled map into nested dictionaries
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index ba7286cb72..d746371420 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -13,14 +13,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer, reactor
-from twisted.internet.error import AlreadyCalled, AlreadyCancelled
-
 import logging
 
-from synapse.util.metrics import Measure
-from synapse.util.logcontext import LoggingContext
+from twisted.internet import defer
+from twisted.internet.error import AlreadyCalled, AlreadyCancelled
 
+from synapse.util.logcontext import LoggingContext
+from synapse.util.metrics import Measure
 
 logger = logging.getLogger(__name__)
 
@@ -199,7 +198,7 @@ class EmailPusher(object):
                     self.timed_call = None
 
         if soonest_due_at is not None:
-            self.timed_call = reactor.callLater(
+            self.timed_call = self.hs.get_reactor().callLater(
                 self.seconds_until(soonest_due_at), self.on_timer
             )
 
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index bf7ff74a1a..81e18bcf7d 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -15,16 +15,16 @@
 # limitations under the License.
 import logging
 
-from twisted.internet import defer, reactor
+from prometheus_client import Counter
+
+from twisted.internet import defer
 from twisted.internet.error import AlreadyCalled, AlreadyCancelled
 
-from . import push_rule_evaluator
-from . import push_tools
 from synapse.push import PusherConfigException
 from synapse.util.logcontext import LoggingContext
 from synapse.util.metrics import Measure
 
-from prometheus_client import Counter
+from . import push_rule_evaluator, push_tools
 
 logger = logging.getLogger(__name__)
 
@@ -220,7 +220,9 @@ class HttpPusher(object):
                     )
                 else:
                     logger.info("Push failed: delaying for %ds", self.backoff_delay)
-                    self.timed_call = reactor.callLater(self.backoff_delay, self.on_timer)
+                    self.timed_call = self.hs.get_reactor().callLater(
+                        self.backoff_delay, self.on_timer
+                    )
                     self.backoff_delay = min(self.backoff_delay * 2, self.MAX_BACKOFF_SEC)
                     break
 
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index d4be800e5e..bfa6df7b68 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -13,30 +13,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
-from twisted.mail.smtp import sendmail
-
-import email.utils
 import email.mime.multipart
-from email.mime.text import MIMEText
+import email.utils
+import logging
+import time
+import urllib
 from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
 
-from synapse.util.async import concurrently_execute
+import bleach
+import jinja2
+
+from twisted.internet import defer
+from twisted.mail.smtp import sendmail
+
+from synapse.api.constants import EventTypes
+from synapse.api.errors import StoreError
 from synapse.push.presentable_names import (
-    calculate_room_name, name_from_member_event, descriptor_from_member_events
+    calculate_room_name,
+    descriptor_from_member_events,
+    name_from_member_event,
 )
 from synapse.types import UserID
-from synapse.api.errors import StoreError
-from synapse.api.constants import EventTypes
+from synapse.util.async_helpers import concurrently_execute
 from synapse.visibility import filter_events_for_client
 
-import jinja2
-import bleach
-
-import time
-import urllib
-
-import logging
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py
index 43f0c74ff3..eef6e18c2e 100644
--- a/synapse/push/presentable_names.py
+++ b/synapse/push/presentable_names.py
@@ -13,10 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
-
-import re
 import logging
+import re
+
+from twisted.internet import defer
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py
index cf735f7468..2bd321d530 100644
--- a/synapse/push/push_rule_evaluator.py
+++ b/synapse/push/push_rule_evaluator.py
@@ -17,12 +17,12 @@
 import logging
 import re
 
+from six import string_types
+
 from synapse.types import UserID
 from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
 from synapse.util.caches.lrucache import LruCache
 
-from six import string_types
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index 6835f54e97..8049c298c2 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -14,9 +14,8 @@
 # limitations under the License.
 
 from twisted.internet import defer
-from synapse.push.presentable_names import (
-    calculate_room_name, name_from_member_event
-)
+
+from synapse.push.presentable_names import calculate_room_name, name_from_member_event
 
 
 @defer.inlineCallbacks
diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py
index 5aa6667e91..fcee6d9d7e 100644
--- a/synapse/push/pusher.py
+++ b/synapse/push/pusher.py
@@ -13,9 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from .httppusher import HttpPusher
 
-import logging
 logger = logging.getLogger(__name__)
 
 # We try importing this if we can (it will fail if we don't
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 750d11ca38..9f7d5ef217 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -18,8 +18,8 @@ import logging
 
 from twisted.internet import defer
 
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.push.pusher import PusherFactory
-from synapse.util.async import run_on_reactor
 from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 
 logger = logging.getLogger(__name__)
@@ -123,9 +123,14 @@ class PusherPool:
                     p['app_id'], p['pushkey'], p['user_name'],
                 )
 
-    @defer.inlineCallbacks
     def on_new_notifications(self, min_stream_id, max_stream_id):
-        yield run_on_reactor()
+        run_as_background_process(
+            "on_new_notifications",
+            self._on_new_notifications, min_stream_id, max_stream_id,
+        )
+
+    @defer.inlineCallbacks
+    def _on_new_notifications(self, min_stream_id, max_stream_id):
         try:
             users_affected = yield self.store.get_push_action_users_in_range(
                 min_stream_id, max_stream_id
@@ -149,9 +154,14 @@ class PusherPool:
         except Exception:
             logger.exception("Exception in pusher on_new_notifications")
 
-    @defer.inlineCallbacks
     def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
-        yield run_on_reactor()
+        run_as_background_process(
+            "on_new_receipts",
+            self._on_new_receipts, min_stream_id, max_stream_id, affected_room_ids,
+        )
+
+    @defer.inlineCallbacks
+    def _on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
         try:
             # Need to subtract 1 from the minimum because the lower bound here
             # is not inclusive
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index faf6dfdb8d..942d7c721f 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -39,7 +39,7 @@ REQUIREMENTS = {
     "signedjson>=1.0.0": ["signedjson>=1.0.0"],
     "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"],
     "service_identity>=1.0.0": ["service_identity>=1.0.0"],
-    "Twisted>=16.0.0": ["twisted>=16.0.0"],
+    "Twisted>=17.1.0": ["twisted>=17.1.0"],
 
     # We use crypto.get_elliptic_curve which is only supported in >=0.15
     "pyopenssl>=0.15": ["OpenSSL>=0.15"],
@@ -57,16 +57,14 @@ REQUIREMENTS = {
     "phonenumbers>=8.2.0": ["phonenumbers"],
     "six": ["six"],
     "prometheus_client": ["prometheus_client"],
-    "attr": ["attr"],
+    "attrs": ["attr"],
+    "netaddr>=0.7.18": ["netaddr"],
 }
 
 CONDITIONAL_REQUIREMENTS = {
     "web_client": {
         "matrix_angular_sdk>=0.6.8": ["syweb>=0.6.8"],
     },
-    "preview_url": {
-        "netaddr>=0.7.18": ["netaddr"],
-    },
     "email.enable_notifs": {
         "Jinja2>=2.8": ["Jinja2>=2.8"],
         "bleach>=1.4.2": ["bleach>=1.4.2"],
@@ -80,6 +78,9 @@ CONDITIONAL_REQUIREMENTS = {
     "affinity": {
         "affinity": ["affinity"],
     },
+    "postgres": {
+        "psycopg2>=2.6": ["psycopg2"]
+    }
 }
 
 
diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py
index 1d7a607529..19f214281e 100644
--- a/synapse/replication/http/__init__.py
+++ b/synapse/replication/http/__init__.py
@@ -14,8 +14,7 @@
 # limitations under the License.
 
 from synapse.http.server import JsonResource
-from synapse.replication.http import membership, send_event
-
+from synapse.replication.http import federation, membership, send_event
 
 REPLICATION_PREFIX = "/_synapse/replication"
 
@@ -28,3 +27,4 @@ class ReplicationRestResource(JsonResource):
     def register_servlets(self, hs):
         send_event.register_servlets(hs, self)
         membership.register_servlets(hs, self)
+        federation.register_servlets(hs, self)
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
new file mode 100644
index 0000000000..5e5376cf58
--- /dev/null
+++ b/synapse/replication/http/_base.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import logging
+import re
+
+from six.moves import urllib
+
+from twisted.internet import defer
+
+from synapse.api.errors import CodeMessageException, HttpResponseException
+from synapse.util.caches.response_cache import ResponseCache
+from synapse.util.stringutils import random_string
+
+logger = logging.getLogger(__name__)
+
+
+class ReplicationEndpoint(object):
+    """Helper base class for defining new replication HTTP endpoints.
+
+    This creates an endpoint under `/_synapse/replication/:NAME/:PATH_ARGS..`
+    (with an `/:txn_id` prefix for cached requests.), where NAME is a name,
+    PATH_ARGS are a tuple of parameters to be encoded in the URL.
+
+    For example, if `NAME` is "send_event" and `PATH_ARGS` is `("event_id",)`,
+    with `CACHE` set to true then this generates an endpoint:
+
+        /_synapse/replication/send_event/:event_id/:txn_id
+
+    For POST/PUT requests the payload is serialized to json and sent as the
+    body, while for GET requests the payload is added as query parameters. See
+    `_serialize_payload` for details.
+
+    Incoming requests are handled by overriding `_handle_request`. Servers
+    must call `register` to register the path with the HTTP server.
+
+    Requests can be sent by calling the client returned by `make_client`.
+
+    Attributes:
+        NAME (str): A name for the endpoint, added to the path as well as used
+            in logging and metrics.
+        PATH_ARGS (tuple[str]): A list of parameters to be added to the path.
+            Adding parameters to the path (rather than payload) can make it
+            easier to follow along in the log files.
+        METHOD (str): The method of the HTTP request, defaults to POST. Can be
+            one of POST, PUT or GET. If GET then the payload is sent as query
+            parameters rather than a JSON body.
+        CACHE (bool): Whether server should cache the result of the request/
+            If true then transparently adds a txn_id to all requests, and
+            `_handle_request` must return a Deferred.
+        RETRY_ON_TIMEOUT(bool): Whether or not to retry the request when a 504
+            is received.
+    """
+
+    __metaclass__ = abc.ABCMeta
+
+    NAME = abc.abstractproperty()
+    PATH_ARGS = abc.abstractproperty()
+
+    METHOD = "POST"
+    CACHE = True
+    RETRY_ON_TIMEOUT = True
+
+    def __init__(self, hs):
+        if self.CACHE:
+            self.response_cache = ResponseCache(
+                hs, "repl." + self.NAME,
+                timeout_ms=30 * 60 * 1000,
+            )
+
+        assert self.METHOD in ("PUT", "POST", "GET")
+
+    @abc.abstractmethod
+    def _serialize_payload(**kwargs):
+        """Static method that is called when creating a request.
+
+        Concrete implementations should have explicit parameters (rather than
+        kwargs) so that an appropriate exception is raised if the client is
+        called with unexpected parameters. All PATH_ARGS must appear in
+        argument list.
+
+        Returns:
+            Deferred[dict]|dict: If POST/PUT request then dictionary must be
+            JSON serialisable, otherwise must be appropriate for adding as
+            query args.
+        """
+        return {}
+
+    @abc.abstractmethod
+    def _handle_request(self, request, **kwargs):
+        """Handle incoming request.
+
+        This is called with the request object and PATH_ARGS.
+
+        Returns:
+            Deferred[dict]: A JSON serialisable dict to be used as response
+            body of request.
+        """
+        pass
+
+    @classmethod
+    def make_client(cls, hs):
+        """Create a client that makes requests.
+
+        Returns a callable that accepts the same parameters as `_serialize_payload`.
+        """
+        clock = hs.get_clock()
+        host = hs.config.worker_replication_host
+        port = hs.config.worker_replication_http_port
+
+        client = hs.get_simple_http_client()
+
+        @defer.inlineCallbacks
+        def send_request(**kwargs):
+            data = yield cls._serialize_payload(**kwargs)
+
+            url_args = [urllib.parse.quote(kwargs[name]) for name in cls.PATH_ARGS]
+
+            if cls.CACHE:
+                txn_id = random_string(10)
+                url_args.append(txn_id)
+
+            if cls.METHOD == "POST":
+                request_func = client.post_json_get_json
+            elif cls.METHOD == "PUT":
+                request_func = client.put_json
+            elif cls.METHOD == "GET":
+                request_func = client.get_json
+            else:
+                # We have already asserted in the constructor that a
+                # compatible was picked, but lets be paranoid.
+                raise Exception(
+                    "Unknown METHOD on %s replication endpoint" % (cls.NAME,)
+                )
+
+            uri = "http://%s:%s/_synapse/replication/%s/%s" % (
+                host, port, cls.NAME, "/".join(url_args)
+            )
+
+            try:
+                # We keep retrying the same request for timeouts. This is so that we
+                # have a good idea that the request has either succeeded or failed on
+                # the master, and so whether we should clean up or not.
+                while True:
+                    try:
+                        result = yield request_func(uri, data)
+                        break
+                    except CodeMessageException as e:
+                        if e.code != 504 or not cls.RETRY_ON_TIMEOUT:
+                            raise
+
+                    logger.warn("%s request timed out", cls.NAME)
+
+                    # If we timed out we probably don't need to worry about backing
+                    # off too much, but lets just wait a little anyway.
+                    yield clock.sleep(1)
+            except HttpResponseException as e:
+                # We convert to SynapseError as we know that it was a SynapseError
+                # on the master process that we should send to the client. (And
+                # importantly, not stack traces everywhere)
+                raise e.to_synapse_error()
+
+            defer.returnValue(result)
+
+        return send_request
+
+    def register(self, http_server):
+        """Called by the server to register this as a handler to the
+        appropriate path.
+        """
+
+        url_args = list(self.PATH_ARGS)
+        handler = self._handle_request
+        method = self.METHOD
+
+        if self.CACHE:
+            handler = self._cached_handler
+            url_args.append("txn_id")
+
+        args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args)
+        pattern = re.compile("^/_synapse/replication/%s/%s$" % (
+            self.NAME,
+            args
+        ))
+
+        http_server.register_paths(method, [pattern], handler)
+
+    def _cached_handler(self, request, txn_id, **kwargs):
+        """Called on new incoming requests when caching is enabled. Checks
+        if there is a cached response for the request and returns that,
+        otherwise calls `_handle_request` and caches its response.
+        """
+        # We just use the txn_id here, but we probably also want to use the
+        # other PATH_ARGS as well.
+
+        assert self.CACHE
+
+        return self.response_cache.wrap(
+            txn_id,
+            self._handle_request,
+            request, **kwargs
+        )
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
new file mode 100644
index 0000000000..64a79da162
--- /dev/null
+++ b/synapse/replication/http/federation.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.events import FrozenEvent
+from synapse.events.snapshot import EventContext
+from synapse.http.servlet import parse_json_object_from_request
+from synapse.replication.http._base import ReplicationEndpoint
+from synapse.util.metrics import Measure
+
+logger = logging.getLogger(__name__)
+
+
+class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
+    """Handles events newly received from federation, including persisting and
+    notifying.
+
+    The API looks like:
+
+        POST /_synapse/replication/fed_send_events/:txn_id
+
+        {
+            "events": [{
+                "event": { .. serialized event .. },
+                "internal_metadata": { .. serialized internal_metadata .. },
+                "rejected_reason": ..,   // The event.rejected_reason field
+                "context": { .. serialized event context .. },
+            }],
+            "backfilled": false
+    """
+
+    NAME = "fed_send_events"
+    PATH_ARGS = ()
+
+    def __init__(self, hs):
+        super(ReplicationFederationSendEventsRestServlet, self).__init__(hs)
+
+        self.store = hs.get_datastore()
+        self.clock = hs.get_clock()
+        self.federation_handler = hs.get_handlers().federation_handler
+
+    @staticmethod
+    @defer.inlineCallbacks
+    def _serialize_payload(store, event_and_contexts, backfilled):
+        """
+        Args:
+            store
+            event_and_contexts (list[tuple[FrozenEvent, EventContext]])
+            backfilled (bool): Whether or not the events are the result of
+                backfilling
+        """
+        event_payloads = []
+        for event, context in event_and_contexts:
+            serialized_context = yield context.serialize(event, store)
+
+            event_payloads.append({
+                "event": event.get_pdu_json(),
+                "internal_metadata": event.internal_metadata.get_dict(),
+                "rejected_reason": event.rejected_reason,
+                "context": serialized_context,
+            })
+
+        payload = {
+            "events": event_payloads,
+            "backfilled": backfilled,
+        }
+
+        defer.returnValue(payload)
+
+    @defer.inlineCallbacks
+    def _handle_request(self, request):
+        with Measure(self.clock, "repl_fed_send_events_parse"):
+            content = parse_json_object_from_request(request)
+
+            backfilled = content["backfilled"]
+
+            event_payloads = content["events"]
+
+            event_and_contexts = []
+            for event_payload in event_payloads:
+                event_dict = event_payload["event"]
+                internal_metadata = event_payload["internal_metadata"]
+                rejected_reason = event_payload["rejected_reason"]
+                event = FrozenEvent(event_dict, internal_metadata, rejected_reason)
+
+                context = yield EventContext.deserialize(
+                    self.store, event_payload["context"],
+                )
+
+                event_and_contexts.append((event, context))
+
+        logger.info(
+            "Got %d events from federation",
+            len(event_and_contexts),
+        )
+
+        yield self.federation_handler.persist_events_and_notify(
+            event_and_contexts, backfilled,
+        )
+
+        defer.returnValue((200, {}))
+
+
+class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
+    """Handles EDUs newly received from federation, including persisting and
+    notifying.
+
+    Request format:
+
+        POST /_synapse/replication/fed_send_edu/:edu_type/:txn_id
+
+        {
+            "origin": ...,
+            "content: { ... }
+        }
+    """
+
+    NAME = "fed_send_edu"
+    PATH_ARGS = ("edu_type",)
+
+    def __init__(self, hs):
+        super(ReplicationFederationSendEduRestServlet, self).__init__(hs)
+
+        self.store = hs.get_datastore()
+        self.clock = hs.get_clock()
+        self.registry = hs.get_federation_registry()
+
+    @staticmethod
+    def _serialize_payload(edu_type, origin, content):
+        return {
+            "origin": origin,
+            "content": content,
+        }
+
+    @defer.inlineCallbacks
+    def _handle_request(self, request, edu_type):
+        with Measure(self.clock, "repl_fed_send_edu_parse"):
+            content = parse_json_object_from_request(request)
+
+            origin = content["origin"]
+            edu_content = content["content"]
+
+        logger.info(
+            "Got %r edu from %s",
+            edu_type, origin,
+        )
+
+        result = yield self.registry.on_edu(edu_type, origin, edu_content)
+
+        defer.returnValue((200, result))
+
+
+class ReplicationGetQueryRestServlet(ReplicationEndpoint):
+    """Handle responding to queries from federation.
+
+    Request format:
+
+        POST /_synapse/replication/fed_query/:query_type
+
+        {
+            "args": { ... }
+        }
+    """
+
+    NAME = "fed_query"
+    PATH_ARGS = ("query_type",)
+
+    # This is a query, so let's not bother caching
+    CACHE = False
+
+    def __init__(self, hs):
+        super(ReplicationGetQueryRestServlet, self).__init__(hs)
+
+        self.store = hs.get_datastore()
+        self.clock = hs.get_clock()
+        self.registry = hs.get_federation_registry()
+
+    @staticmethod
+    def _serialize_payload(query_type, args):
+        """
+        Args:
+            query_type (str)
+            args (dict): The arguments received for the given query type
+        """
+        return {
+            "args": args,
+        }
+
+    @defer.inlineCallbacks
+    def _handle_request(self, request, query_type):
+        with Measure(self.clock, "repl_fed_query_parse"):
+            content = parse_json_object_from_request(request)
+
+            args = content["args"]
+
+        logger.info(
+            "Got %r query",
+            query_type,
+        )
+
+        result = yield self.registry.on_query(query_type, args)
+
+        defer.returnValue((200, result))
+
+
+class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
+    """Called to clean up any data in DB for a given room, ready for the
+    server to join the room.
+
+    Request format:
+
+        POST /_synapse/replication/fed_query/:fed_cleanup_room/:txn_id
+
+        {}
+    """
+
+    NAME = "fed_cleanup_room"
+    PATH_ARGS = ("room_id",)
+
+    def __init__(self, hs):
+        super(ReplicationCleanRoomRestServlet, self).__init__(hs)
+
+        self.store = hs.get_datastore()
+
+    @staticmethod
+    def _serialize_payload(room_id, args):
+        """
+        Args:
+            room_id (str)
+        """
+        return {}
+
+    @defer.inlineCallbacks
+    def _handle_request(self, request, room_id):
+        yield self.store.clean_room_for_join(room_id)
+
+        defer.returnValue((200, {}))
+
+
+def register_servlets(hs, http_server):
+    ReplicationFederationSendEventsRestServlet(hs).register(http_server)
+    ReplicationFederationSendEduRestServlet(hs).register(http_server)
+    ReplicationGetQueryRestServlet(hs).register(http_server)
+    ReplicationCleanRoomRestServlet(hs).register(http_server)
diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index e66c4e881f..e58bebf12a 100644
--- a/synapse/replication/http/membership.py
+++ b/synapse/replication/http/membership.py
@@ -14,182 +14,63 @@
 # limitations under the License.
 
 import logging
-import re
 
 from twisted.internet import defer
 
-from synapse.api.errors import SynapseError, MatrixCodeMessageException
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.servlet import parse_json_object_from_request
+from synapse.replication.http._base import ReplicationEndpoint
 from synapse.types import Requester, UserID
-from synapse.util.distributor import user_left_room, user_joined_room
+from synapse.util.distributor import user_joined_room, user_left_room
 
 logger = logging.getLogger(__name__)
 
 
-@defer.inlineCallbacks
-def remote_join(client, host, port, requester, remote_room_hosts,
-                room_id, user_id, content):
-    """Ask the master to do a remote join for the given user to the given room
+class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
+    """Does a remote join for the given user to the given room
 
-    Args:
-        client (SimpleHttpClient)
-        host (str): host of master
-        port (int): port on master listening for HTTP replication
-        requester (Requester)
-        remote_room_hosts (list[str]): Servers to try and join via
-        room_id (str)
-        user_id (str)
-        content (dict): The event content to use for the join event
+    Request format:
 
-    Returns:
-        Deferred
-    """
-    uri = "http://%s:%s/_synapse/replication/remote_join" % (host, port)
-
-    payload = {
-        "requester": requester.serialize(),
-        "remote_room_hosts": remote_room_hosts,
-        "room_id": room_id,
-        "user_id": user_id,
-        "content": content,
-    }
-
-    try:
-        result = yield client.post_json_get_json(uri, payload)
-    except MatrixCodeMessageException as e:
-        # We convert to SynapseError as we know that it was a SynapseError
-        # on the master process that we should send to the client. (And
-        # importantly, not stack traces everywhere)
-        raise SynapseError(e.code, e.msg, e.errcode)
-    defer.returnValue(result)
-
-
-@defer.inlineCallbacks
-def remote_reject_invite(client, host, port, requester, remote_room_hosts,
-                         room_id, user_id):
-    """Ask master to reject the invite for the user and room.
-
-    Args:
-        client (SimpleHttpClient)
-        host (str): host of master
-        port (int): port on master listening for HTTP replication
-        requester (Requester)
-        remote_room_hosts (list[str]): Servers to try and reject via
-        room_id (str)
-        user_id (str)
-
-    Returns:
-        Deferred
-    """
-    uri = "http://%s:%s/_synapse/replication/remote_reject_invite" % (host, port)
-
-    payload = {
-        "requester": requester.serialize(),
-        "remote_room_hosts": remote_room_hosts,
-        "room_id": room_id,
-        "user_id": user_id,
-    }
-
-    try:
-        result = yield client.post_json_get_json(uri, payload)
-    except MatrixCodeMessageException as e:
-        # We convert to SynapseError as we know that it was a SynapseError
-        # on the master process that we should send to the client. (And
-        # importantly, not stack traces everywhere)
-        raise SynapseError(e.code, e.msg, e.errcode)
-    defer.returnValue(result)
-
-
-@defer.inlineCallbacks
-def get_or_register_3pid_guest(client, host, port, requester,
-                               medium, address, inviter_user_id):
-    """Ask the master to get/create a guest account for given 3PID.
-
-    Args:
-        client (SimpleHttpClient)
-        host (str): host of master
-        port (int): port on master listening for HTTP replication
-        requester (Requester)
-        medium (str)
-        address (str)
-        inviter_user_id (str): The user ID who is trying to invite the
-            3PID
-
-    Returns:
-        Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the
-        3PID guest account.
-    """
+        POST /_synapse/replication/remote_join/:room_id/:user_id
 
-    uri = "http://%s:%s/_synapse/replication/get_or_register_3pid_guest" % (host, port)
-
-    payload = {
-        "requester": requester.serialize(),
-        "medium": medium,
-        "address": address,
-        "inviter_user_id": inviter_user_id,
-    }
-
-    try:
-        result = yield client.post_json_get_json(uri, payload)
-    except MatrixCodeMessageException as e:
-        # We convert to SynapseError as we know that it was a SynapseError
-        # on the master process that we should send to the client. (And
-        # importantly, not stack traces everywhere)
-        raise SynapseError(e.code, e.msg, e.errcode)
-    defer.returnValue(result)
-
-
-@defer.inlineCallbacks
-def notify_user_membership_change(client, host, port, user_id, room_id, change):
-    """Notify master that a user has joined or left the room
-
-    Args:
-        client (SimpleHttpClient)
-        host (str): host of master
-        port (int): port on master listening for HTTP replication.
-        user_id (str)
-        room_id (str)
-        change (str): Either "join" or "left"
-
-    Returns:
-        Deferred
+        {
+            "requester": ...,
+            "remote_room_hosts": [...],
+            "content": { ... }
+        }
     """
-    assert change in ("joined", "left")
-
-    uri = "http://%s:%s/_synapse/replication/user_%s_room" % (host, port, change)
-
-    payload = {
-        "user_id": user_id,
-        "room_id": room_id,
-    }
-
-    try:
-        result = yield client.post_json_get_json(uri, payload)
-    except MatrixCodeMessageException as e:
-        # We convert to SynapseError as we know that it was a SynapseError
-        # on the master process that we should send to the client. (And
-        # importantly, not stack traces everywhere)
-        raise SynapseError(e.code, e.msg, e.errcode)
-    defer.returnValue(result)
-
 
-class ReplicationRemoteJoinRestServlet(RestServlet):
-    PATTERNS = [re.compile("^/_synapse/replication/remote_join$")]
+    NAME = "remote_join"
+    PATH_ARGS = ("room_id", "user_id",)
 
     def __init__(self, hs):
-        super(ReplicationRemoteJoinRestServlet, self).__init__()
+        super(ReplicationRemoteJoinRestServlet, self).__init__(hs)
 
         self.federation_handler = hs.get_handlers().federation_handler
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
 
+    @staticmethod
+    def _serialize_payload(requester, room_id, user_id, remote_room_hosts,
+                           content):
+        """
+        Args:
+            requester(Requester)
+            room_id (str)
+            user_id (str)
+            remote_room_hosts (list[str]): Servers to try and join via
+            content(dict): The event content to use for the join event
+        """
+        return {
+            "requester": requester.serialize(),
+            "remote_room_hosts": remote_room_hosts,
+            "content": content,
+        }
+
     @defer.inlineCallbacks
-    def on_POST(self, request):
+    def _handle_request(self, request, room_id, user_id):
         content = parse_json_object_from_request(request)
 
         remote_room_hosts = content["remote_room_hosts"]
-        room_id = content["room_id"]
-        user_id = content["user_id"]
         event_content = content["content"]
 
         requester = Requester.deserialize(self.store, content["requester"])
@@ -212,23 +93,48 @@ class ReplicationRemoteJoinRestServlet(RestServlet):
         defer.returnValue((200, {}))
 
 
-class ReplicationRemoteRejectInviteRestServlet(RestServlet):
-    PATTERNS = [re.compile("^/_synapse/replication/remote_reject_invite$")]
+class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
+    """Rejects the invite for the user and room.
+
+    Request format:
+
+        POST /_synapse/replication/remote_reject_invite/:room_id/:user_id
+
+        {
+            "requester": ...,
+            "remote_room_hosts": [...],
+        }
+    """
+
+    NAME = "remote_reject_invite"
+    PATH_ARGS = ("room_id", "user_id",)
 
     def __init__(self, hs):
-        super(ReplicationRemoteRejectInviteRestServlet, self).__init__()
+        super(ReplicationRemoteRejectInviteRestServlet, self).__init__(hs)
 
         self.federation_handler = hs.get_handlers().federation_handler
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
 
+    @staticmethod
+    def _serialize_payload(requester, room_id, user_id, remote_room_hosts):
+        """
+        Args:
+            requester(Requester)
+            room_id (str)
+            user_id (str)
+            remote_room_hosts (list[str]): Servers to try and reject via
+        """
+        return {
+            "requester": requester.serialize(),
+            "remote_room_hosts": remote_room_hosts,
+        }
+
     @defer.inlineCallbacks
-    def on_POST(self, request):
+    def _handle_request(self, request, room_id, user_id):
         content = parse_json_object_from_request(request)
 
         remote_room_hosts = content["remote_room_hosts"]
-        room_id = content["room_id"]
-        user_id = content["user_id"]
 
         requester = Requester.deserialize(self.store, content["requester"])
 
@@ -264,18 +170,50 @@ class ReplicationRemoteRejectInviteRestServlet(RestServlet):
         defer.returnValue((200, ret))
 
 
-class ReplicationRegister3PIDGuestRestServlet(RestServlet):
-    PATTERNS = [re.compile("^/_synapse/replication/get_or_register_3pid_guest$")]
+class ReplicationRegister3PIDGuestRestServlet(ReplicationEndpoint):
+    """Gets/creates a guest account for given 3PID.
+
+    Request format:
+
+        POST /_synapse/replication/get_or_register_3pid_guest/
+
+        {
+            "requester": ...,
+            "medium": ...,
+            "address": ...,
+            "inviter_user_id": ...
+        }
+    """
+
+    NAME = "get_or_register_3pid_guest"
+    PATH_ARGS = ()
 
     def __init__(self, hs):
-        super(ReplicationRegister3PIDGuestRestServlet, self).__init__()
+        super(ReplicationRegister3PIDGuestRestServlet, self).__init__(hs)
 
         self.registeration_handler = hs.get_handlers().registration_handler
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
 
+    @staticmethod
+    def _serialize_payload(requester, medium, address, inviter_user_id):
+        """
+        Args:
+            requester(Requester)
+            medium (str)
+            address (str)
+            inviter_user_id (str): The user ID who is trying to invite the
+                3PID
+        """
+        return {
+            "requester": requester.serialize(),
+            "medium": medium,
+            "address": address,
+            "inviter_user_id": inviter_user_id,
+        }
+
     @defer.inlineCallbacks
-    def on_POST(self, request):
+    def _handle_request(self, request):
         content = parse_json_object_from_request(request)
 
         medium = content["medium"]
@@ -296,23 +234,41 @@ class ReplicationRegister3PIDGuestRestServlet(RestServlet):
         defer.returnValue((200, ret))
 
 
-class ReplicationUserJoinedLeftRoomRestServlet(RestServlet):
-    PATTERNS = [re.compile("^/_synapse/replication/user_(?P<change>joined|left)_room$")]
+class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
+    """Notifies that a user has joined or left the room
+
+    Request format:
+
+        POST /_synapse/replication/membership_change/:room_id/:user_id/:change
+
+        {}
+    """
+
+    NAME = "membership_change"
+    PATH_ARGS = ("room_id", "user_id", "change")
+    CACHE = False  # No point caching as should return instantly.
 
     def __init__(self, hs):
-        super(ReplicationUserJoinedLeftRoomRestServlet, self).__init__()
+        super(ReplicationUserJoinedLeftRoomRestServlet, self).__init__(hs)
 
         self.registeration_handler = hs.get_handlers().registration_handler
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
         self.distributor = hs.get_distributor()
 
-    def on_POST(self, request, change):
-        content = parse_json_object_from_request(request)
+    @staticmethod
+    def _serialize_payload(room_id, user_id, change):
+        """
+        Args:
+            room_id (str)
+            user_id (str)
+            change (str): Either "joined" or "left"
+        """
+        assert change in ("joined", "left",)
 
-        user_id = content["user_id"]
-        room_id = content["room_id"]
+        return {}
 
+    def _handle_request(self, request, room_id, user_id, change):
         logger.info("user membership change: %s in %s", user_id, room_id)
 
         user = UserID.from_string(user_id)
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index a9baa2c1c3..5b52c91650 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -13,86 +13,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
-from synapse.api.errors import (
-    SynapseError, MatrixCodeMessageException, CodeMessageException,
-)
 from synapse.events import FrozenEvent
 from synapse.events.snapshot import EventContext
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.util.async import sleep
-from synapse.util.caches.response_cache import ResponseCache
-from synapse.util.metrics import Measure
+from synapse.http.servlet import parse_json_object_from_request
+from synapse.replication.http._base import ReplicationEndpoint
 from synapse.types import Requester, UserID
-
-import logging
-import re
+from synapse.util.metrics import Measure
 
 logger = logging.getLogger(__name__)
 
 
-@defer.inlineCallbacks
-def send_event_to_master(client, host, port, requester, event, context,
-                         ratelimit, extra_users):
-    """Send event to be handled on the master
-
-    Args:
-        client (SimpleHttpClient)
-        host (str): host of master
-        port (int): port on master listening for HTTP replication
-        requester (Requester)
-        event (FrozenEvent)
-        context (EventContext)
-        ratelimit (bool)
-        extra_users (list(UserID)): Any extra users to notify about event
-    """
-    uri = "http://%s:%s/_synapse/replication/send_event/%s" % (
-        host, port, event.event_id,
-    )
-
-    payload = {
-        "event": event.get_pdu_json(),
-        "internal_metadata": event.internal_metadata.get_dict(),
-        "rejected_reason": event.rejected_reason,
-        "context": context.serialize(event),
-        "requester": requester.serialize(),
-        "ratelimit": ratelimit,
-        "extra_users": [u.to_string() for u in extra_users],
-    }
-
-    try:
-        # We keep retrying the same request for timeouts. This is so that we
-        # have a good idea that the request has either succeeded or failed on
-        # the master, and so whether we should clean up or not.
-        while True:
-            try:
-                result = yield client.put_json(uri, payload)
-                break
-            except CodeMessageException as e:
-                if e.code != 504:
-                    raise
-
-            logger.warn("send_event request timed out")
-
-            # If we timed out we probably don't need to worry about backing
-            # off too much, but lets just wait a little anyway.
-            yield sleep(1)
-    except MatrixCodeMessageException as e:
-        # We convert to SynapseError as we know that it was a SynapseError
-        # on the master process that we should send to the client. (And
-        # importantly, not stack traces everywhere)
-        raise SynapseError(e.code, e.msg, e.errcode)
-    defer.returnValue(result)
-
-
-class ReplicationSendEventRestServlet(RestServlet):
+class ReplicationSendEventRestServlet(ReplicationEndpoint):
     """Handles events newly created on workers, including persisting and
     notifying.
 
     The API looks like:
 
-        POST /_synapse/replication/send_event/:event_id
+        POST /_synapse/replication/send_event/:event_id/:txn_id
 
         {
             "event": { .. serialized event .. },
@@ -104,27 +45,47 @@ class ReplicationSendEventRestServlet(RestServlet):
             "extra_users": [],
         }
     """
-    PATTERNS = [re.compile("^/_synapse/replication/send_event/(?P<event_id>[^/]+)$")]
+    NAME = "send_event"
+    PATH_ARGS = ("event_id",)
 
     def __init__(self, hs):
-        super(ReplicationSendEventRestServlet, self).__init__()
+        super(ReplicationSendEventRestServlet, self).__init__(hs)
 
         self.event_creation_handler = hs.get_event_creation_handler()
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
 
-        # The responses are tiny, so we may as well cache them for a while
-        self.response_cache = ResponseCache(hs, "send_event", timeout_ms=30 * 60 * 1000)
+    @staticmethod
+    @defer.inlineCallbacks
+    def _serialize_payload(event_id, store, event, context, requester,
+                           ratelimit, extra_users):
+        """
+        Args:
+            event_id (str)
+            store (DataStore)
+            requester (Requester)
+            event (FrozenEvent)
+            context (EventContext)
+            ratelimit (bool)
+            extra_users (list(UserID)): Any extra users to notify about event
+        """
+
+        serialized_context = yield context.serialize(event, store)
+
+        payload = {
+            "event": event.get_pdu_json(),
+            "internal_metadata": event.internal_metadata.get_dict(),
+            "rejected_reason": event.rejected_reason,
+            "context": serialized_context,
+            "requester": requester.serialize(),
+            "ratelimit": ratelimit,
+            "extra_users": [u.to_string() for u in extra_users],
+        }
 
-    def on_PUT(self, request, event_id):
-        return self.response_cache.wrap(
-            event_id,
-            self._handle_request,
-            request
-        )
+        defer.returnValue(payload)
 
     @defer.inlineCallbacks
-    def _handle_request(self, request):
+    def _handle_request(self, request, event_id):
         with Measure(self.clock, "repl_send_event_parse"):
             content = parse_json_object_from_request(request)
 
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index 61f5590c53..3f7be74e02 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -13,13 +13,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.engines import PostgresEngine
 
 from ._slaved_id_tracker import SlavedIdTracker
 
-import logging
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/replication/slave/storage/appservice.py b/synapse/replication/slave/storage/appservice.py
index 8cae3076f4..b53a4c6bd1 100644
--- a/synapse/replication/slave/storage/appservice.py
+++ b/synapse/replication/slave/storage/appservice.py
@@ -15,7 +15,8 @@
 # limitations under the License.
 
 from synapse.storage.appservice import (
-    ApplicationServiceWorkerStore, ApplicationServiceTransactionWorkerStore,
+    ApplicationServiceTransactionWorkerStore,
+    ApplicationServiceWorkerStore,
 )
 
 
diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py
index 352c9a2aa8..60641f1a49 100644
--- a/synapse/replication/slave/storage/client_ips.py
+++ b/synapse/replication/slave/storage/client_ips.py
@@ -13,11 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
 from synapse.storage.client_ips import LAST_SEEN_GRANULARITY
 from synapse.util.caches import CACHE_SIZE_FACTOR
 from synapse.util.caches.descriptors import Cache
 
+from ._base import BaseSlavedStore
+
 
 class SlavedClientIpStore(BaseSlavedStore):
     def __init__(self, db_conn, hs):
diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py
index 6f3fb64770..87eaa53004 100644
--- a/synapse/replication/slave/storage/deviceinbox.py
+++ b/synapse/replication/slave/storage/deviceinbox.py
@@ -13,11 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
-from ._slaved_id_tracker import SlavedIdTracker
 from synapse.storage import DataStore
-from synapse.util.caches.stream_change_cache import StreamChangeCache
 from synapse.util.caches.expiringcache import ExpiringCache
+from synapse.util.caches.stream_change_cache import StreamChangeCache
+
+from ._base import BaseSlavedStore
+from ._slaved_id_tracker import SlavedIdTracker
 
 
 class SlavedDeviceInboxStore(BaseSlavedStore):
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
index 7687867aee..8206a988f7 100644
--- a/synapse/replication/slave/storage/devices.py
+++ b/synapse/replication/slave/storage/devices.py
@@ -13,12 +13,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
-from ._slaved_id_tracker import SlavedIdTracker
 from synapse.storage import DataStore
 from synapse.storage.end_to_end_keys import EndToEndKeyStore
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
+from ._base import BaseSlavedStore
+from ._slaved_id_tracker import SlavedIdTracker
+
 
 class SlavedDeviceStore(BaseSlavedStore):
     def __init__(self, db_conn, hs):
diff --git a/synapse/replication/slave/storage/directory.py b/synapse/replication/slave/storage/directory.py
index 6deecd3963..1d1d48709a 100644
--- a/synapse/replication/slave/storage/directory.py
+++ b/synapse/replication/slave/storage/directory.py
@@ -13,9 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
 from synapse.storage.directory import DirectoryWorkerStore
 
+from ._base import BaseSlavedStore
+
 
 class DirectoryStore(DirectoryWorkerStore, BaseSlavedStore):
     pass
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index b1f64ef0d8..4830c68f35 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -20,9 +20,11 @@ from synapse.storage.event_federation import EventFederationWorkerStore
 from synapse.storage.event_push_actions import EventPushActionsWorkerStore
 from synapse.storage.events_worker import EventsWorkerStore
 from synapse.storage.roommember import RoomMemberWorkerStore
+from synapse.storage.signatures import SignatureWorkerStore
 from synapse.storage.state import StateGroupWorkerStore
 from synapse.storage.stream import StreamWorkerStore
-from synapse.storage.signatures import SignatureWorkerStore
+from synapse.storage.user_erasure_store import UserErasureWorkerStore
+
 from ._base import BaseSlavedStore
 from ._slaved_id_tracker import SlavedIdTracker
 
@@ -42,9 +44,10 @@ class SlavedEventStore(EventFederationWorkerStore,
                        RoomMemberWorkerStore,
                        EventPushActionsWorkerStore,
                        StreamWorkerStore,
-                       EventsWorkerStore,
                        StateGroupWorkerStore,
+                       EventsWorkerStore,
                        SignatureWorkerStore,
+                       UserErasureWorkerStore,
                        BaseSlavedStore):
 
     def __init__(self, db_conn, hs):
diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py
index 819ed62881..456a14cd5c 100644
--- a/synapse/replication/slave/storage/filtering.py
+++ b/synapse/replication/slave/storage/filtering.py
@@ -13,9 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
 from synapse.storage.filtering import FilteringStore
 
+from ._base import BaseSlavedStore
+
 
 class SlavedFilteringStore(BaseSlavedStore):
     def __init__(self, db_conn, hs):
diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py
index 0bc4bce5b0..5777f07c8d 100644
--- a/synapse/replication/slave/storage/groups.py
+++ b/synapse/replication/slave/storage/groups.py
@@ -13,11 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
-from ._slaved_id_tracker import SlavedIdTracker
 from synapse.storage import DataStore
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
+from ._base import BaseSlavedStore
+from ._slaved_id_tracker import SlavedIdTracker
+
 
 class SlavedGroupServerStore(BaseSlavedStore):
     def __init__(self, db_conn, hs):
diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py
index dd2ae49e48..05ed168463 100644
--- a/synapse/replication/slave/storage/keys.py
+++ b/synapse/replication/slave/storage/keys.py
@@ -13,10 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
 from synapse.storage import DataStore
 from synapse.storage.keys import KeyStore
 
+from ._base import BaseSlavedStore
+
 
 class SlavedKeyStore(BaseSlavedStore):
     _get_server_verify_key = KeyStore.__dict__[
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
index cfb9280181..80b744082a 100644
--- a/synapse/replication/slave/storage/presence.py
+++ b/synapse/replication/slave/storage/presence.py
@@ -13,12 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
-from ._slaved_id_tracker import SlavedIdTracker
-
-from synapse.util.caches.stream_change_cache import StreamChangeCache
 from synapse.storage import DataStore
 from synapse.storage.presence import PresenceStore
+from synapse.util.caches.stream_change_cache import StreamChangeCache
+
+from ._base import BaseSlavedStore
+from ._slaved_id_tracker import SlavedIdTracker
 
 
 class SlavedPresenceStore(BaseSlavedStore):
diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py
index bb2c40b6e3..f0200c1e98 100644
--- a/synapse/replication/slave/storage/push_rule.py
+++ b/synapse/replication/slave/storage/push_rule.py
@@ -14,10 +14,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from .events import SlavedEventStore
-from ._slaved_id_tracker import SlavedIdTracker
 from synapse.storage.push_rule import PushRulesWorkerStore
 
+from ._slaved_id_tracker import SlavedIdTracker
+from .events import SlavedEventStore
+
 
 class SlavedPushRuleStore(PushRulesWorkerStore, SlavedEventStore):
     def __init__(self, db_conn, hs):
diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py
index a7cd5a7291..3b2213c0d4 100644
--- a/synapse/replication/slave/storage/pushers.py
+++ b/synapse/replication/slave/storage/pushers.py
@@ -14,11 +14,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from synapse.storage.pusher import PusherWorkerStore
+
 from ._base import BaseSlavedStore
 from ._slaved_id_tracker import SlavedIdTracker
 
-from synapse.storage.pusher import PusherWorkerStore
-
 
 class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore):
 
diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py
index 1647072f65..ed12342f40 100644
--- a/synapse/replication/slave/storage/receipts.py
+++ b/synapse/replication/slave/storage/receipts.py
@@ -14,11 +14,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from synapse.storage.receipts import ReceiptsWorkerStore
+
 from ._base import BaseSlavedStore
 from ._slaved_id_tracker import SlavedIdTracker
 
-from synapse.storage.receipts import ReceiptsWorkerStore
-
 # So, um, we want to borrow a load of functions intended for reading from
 # a DataStore, but we don't want to take functions that either write to the
 # DataStore or are cached and don't have cache invalidation logic.
@@ -49,7 +49,7 @@ class SlavedReceiptsStore(ReceiptsWorkerStore, BaseSlavedStore):
 
     def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id):
         self.get_receipts_for_user.invalidate((user_id, receipt_type))
-        self.get_linearized_receipts_for_room.invalidate_many((room_id,))
+        self._get_linearized_receipts_for_room.invalidate_many((room_id,))
         self.get_last_receipt_event_id_for_user.invalidate(
             (user_id, room_id, receipt_type)
         )
diff --git a/synapse/replication/slave/storage/registration.py b/synapse/replication/slave/storage/registration.py
index 7323bf0f1e..408d91df1c 100644
--- a/synapse/replication/slave/storage/registration.py
+++ b/synapse/replication/slave/storage/registration.py
@@ -13,9 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
 from synapse.storage.registration import RegistrationWorkerStore
 
+from ._base import BaseSlavedStore
+
 
 class SlavedRegistrationStore(RegistrationWorkerStore, BaseSlavedStore):
     pass
diff --git a/synapse/replication/slave/storage/room.py b/synapse/replication/slave/storage/room.py
index 5ae1670157..0cb474928c 100644
--- a/synapse/replication/slave/storage/room.py
+++ b/synapse/replication/slave/storage/room.py
@@ -13,8 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
 from synapse.storage.room import RoomWorkerStore
+
+from ._base import BaseSlavedStore
 from ._slaved_id_tracker import SlavedIdTracker
 
 
diff --git a/synapse/replication/slave/storage/transactions.py b/synapse/replication/slave/storage/transactions.py
index fbb58f35da..3527beb3c9 100644
--- a/synapse/replication/slave/storage/transactions.py
+++ b/synapse/replication/slave/storage/transactions.py
@@ -13,18 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStore
-from synapse.storage import DataStore
 from synapse.storage.transactions import TransactionStore
 
+from ._base import BaseSlavedStore
 
-class TransactionStore(BaseSlavedStore):
-    get_destination_retry_timings = TransactionStore.__dict__[
-        "get_destination_retry_timings"
-    ]
-    _get_destination_retry_timings = DataStore._get_destination_retry_timings.__func__
-    set_destination_retry_timings = DataStore.set_destination_retry_timings.__func__
-    _set_destination_retry_timings = DataStore._set_destination_retry_timings.__func__
 
-    prep_send_transaction = DataStore.prep_send_transaction.__func__
-    delivered_txn = DataStore.delivered_txn.__func__
+class SlavedTransactionStore(TransactionStore, BaseSlavedStore):
+    pass
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 6d2513c4e2..cbe9645817 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -15,17 +15,20 @@
 """A replication client for use by synapse workers.
 """
 
-from twisted.internet import reactor, defer
+import logging
+
+from twisted.internet import defer
 from twisted.internet.protocol import ReconnectingClientFactory
 
 from .commands import (
-    FederationAckCommand, UserSyncCommand, RemovePusherCommand, InvalidateCacheCommand,
+    FederationAckCommand,
+    InvalidateCacheCommand,
+    RemovePusherCommand,
     UserIpCommand,
+    UserSyncCommand,
 )
 from .protocol import ClientReplicationStreamProtocol
 
-import logging
-
 logger = logging.getLogger(__name__)
 
 
@@ -44,7 +47,7 @@ class ReplicationClientFactory(ReconnectingClientFactory):
         self.server_name = hs.config.server_name
         self._clock = hs.get_clock()  # As self.clock is defined in super class
 
-        reactor.addSystemEventTrigger("before", "shutdown", self.stopTrying)
+        hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.stopTrying)
 
     def startedConnecting(self, connector):
         logger.info("Connecting to replication: %r", connector.getDestination())
@@ -95,7 +98,7 @@ class ReplicationClientHandler(object):
         factory = ReplicationClientFactory(hs, client_name, self)
         host = hs.config.worker_replication_host
         port = hs.config.worker_replication_port
-        reactor.connectTCP(host, port, factory)
+        hs.get_reactor().connectTCP(host, port, factory)
 
     def on_rdata(self, stream_name, token, rows):
         """Called when we get new replication data. By default this just pokes
@@ -104,7 +107,7 @@ class ReplicationClientHandler(object):
         Can be overriden in subclasses to handle more.
         """
         logger.info("Received rdata %s -> %s", stream_name, token)
-        self.store.process_replication_rows(stream_name, token, rows)
+        return self.store.process_replication_rows(stream_name, token, rows)
 
     def on_position(self, stream_name, token):
         """Called when we get new position data. By default this just pokes
@@ -112,7 +115,7 @@ class ReplicationClientHandler(object):
 
         Can be overriden in subclasses to handle more.
         """
-        self.store.process_replication_rows(stream_name, token, [])
+        return self.store.process_replication_rows(stream_name, token, [])
 
     def on_sync(self, data):
         """When we received a SYNC we wake up any deferreds that were waiting
@@ -189,7 +192,7 @@ class ReplicationClientHandler(object):
         """Returns a deferred that is resolved when we receive a SYNC command
         with given data.
 
-        Used by tests.
+        [Not currently] used by tests.
         """
         return self.awaiting_syncs.setdefault(data, defer.Deferred())
 
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index 12aac3cc6b..327556f6a1 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -19,13 +19,17 @@ allowed to be sent by which side.
 """
 
 import logging
-import simplejson
+import platform
 
+if platform.python_implementation() == "PyPy":
+    import json
+    _json_encoder = json.JSONEncoder()
+else:
+    import simplejson as json
+    _json_encoder = json.JSONEncoder(namedtuple_as_object=False)
 
 logger = logging.getLogger(__name__)
 
-_json_encoder = simplejson.JSONEncoder(namedtuple_as_object=False)
-
 
 class Command(object):
     """The base command class.
@@ -55,6 +59,12 @@ class Command(object):
         """
         return self.data
 
+    def get_logcontext_id(self):
+        """Get a suitable string for the logcontext when processing this command"""
+
+        # by default, we just use the command name.
+        return self.NAME
+
 
 class ServerCommand(Command):
     """Sent by the server on new connection and includes the server_name.
@@ -102,7 +112,7 @@ class RdataCommand(Command):
         return cls(
             stream_name,
             None if token == "batch" else int(token),
-            simplejson.loads(row_json)
+            json.loads(row_json)
         )
 
     def to_line(self):
@@ -112,6 +122,9 @@ class RdataCommand(Command):
             _json_encoder.encode(self.row),
         ))
 
+    def get_logcontext_id(self):
+        return "RDATA-" + self.stream_name
+
 
 class PositionCommand(Command):
     """Sent by the client to tell the client the stream postition without
@@ -186,6 +199,9 @@ class ReplicateCommand(Command):
     def to_line(self):
         return " ".join((self.stream_name, str(self.token),))
 
+    def get_logcontext_id(self):
+        return "REPLICATE-" + self.stream_name
+
 
 class UserSyncCommand(Command):
     """Sent by the client to inform the server that a user has started or
@@ -300,7 +316,7 @@ class InvalidateCacheCommand(Command):
     def from_line(cls, line):
         cache_func, keys_json = line.split(" ", 1)
 
-        return cls(cache_func, simplejson.loads(keys_json))
+        return cls(cache_func, json.loads(keys_json))
 
     def to_line(self):
         return " ".join((
@@ -329,7 +345,7 @@ class UserIpCommand(Command):
     def from_line(cls, line):
         user_id, jsn = line.split(" ", 1)
 
-        access_token, ip, user_agent, device_id, last_seen = simplejson.loads(jsn)
+        access_token, ip, user_agent, device_id, last_seen = json.loads(jsn)
 
         return cls(
             user_id, access_token, ip, user_agent, device_id, last_seen
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index c870475cd1..74e892c104 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -49,29 +49,39 @@ indicate which side is sending, these are *not* included on the wire::
     * connection closed by server *
 """
 
+import fcntl
+import logging
+import struct
+from collections import defaultdict
+
+from six import iteritems, iterkeys
+
+from prometheus_client import Counter
+
 from twisted.internet import defer
 from twisted.protocols.basic import LineOnlyReceiver
 from twisted.python.failure import Failure
 
-from .commands import (
-    COMMAND_MAP, VALID_CLIENT_COMMANDS, VALID_SERVER_COMMANDS,
-    ErrorCommand, ServerCommand, RdataCommand, PositionCommand, PingCommand,
-    NameCommand, ReplicateCommand, UserSyncCommand, SyncCommand,
-)
-from .streams import STREAMS_MAP
-
 from synapse.metrics import LaterGauge
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 from synapse.util.stringutils import random_string
 
-from prometheus_client import Counter
-
-from collections import defaultdict
-
-from six import iterkeys, iteritems
-
-import logging
-import struct
-import fcntl
+from .commands import (
+    COMMAND_MAP,
+    VALID_CLIENT_COMMANDS,
+    VALID_SERVER_COMMANDS,
+    ErrorCommand,
+    NameCommand,
+    PingCommand,
+    PositionCommand,
+    RdataCommand,
+    ReplicateCommand,
+    ServerCommand,
+    SyncCommand,
+    UserSyncCommand,
+)
+from .streams import STREAMS_MAP
 
 connection_close_counter = Counter(
     "synapse_replication_tcp_protocol_close_reason", "", ["reason_type"])
@@ -214,7 +224,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
 
         # Now lets try and call on_<CMD_NAME> function
         try:
-            getattr(self, "on_%s" % (cmd_name,))(cmd)
+            run_as_background_process(
+                "replication-" + cmd.get_logcontext_id(),
+                getattr(self, "on_%s" % (cmd_name,)),
+                cmd,
+            )
         except Exception:
             logger.exception("[%s] Failed to handle line: %r", self.id(), line)
 
@@ -379,7 +393,7 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
         self.name = cmd.data
 
     def on_USER_SYNC(self, cmd):
-        self.streamer.on_user_sync(
+        return self.streamer.on_user_sync(
             self.conn_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms,
         )
 
@@ -389,22 +403,33 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
 
         if stream_name == "ALL":
             # Subscribe to all streams we're publishing to.
-            for stream in iterkeys(self.streamer.streams_by_name):
-                self.subscribe_to_stream(stream, token)
+            deferreds = [
+                run_in_background(
+                    self.subscribe_to_stream,
+                    stream, token,
+                )
+                for stream in iterkeys(self.streamer.streams_by_name)
+            ]
+
+            return make_deferred_yieldable(
+                defer.gatherResults(deferreds, consumeErrors=True)
+            )
         else:
-            self.subscribe_to_stream(stream_name, token)
+            return self.subscribe_to_stream(stream_name, token)
 
     def on_FEDERATION_ACK(self, cmd):
-        self.streamer.federation_ack(cmd.token)
+        return self.streamer.federation_ack(cmd.token)
 
     def on_REMOVE_PUSHER(self, cmd):
-        self.streamer.on_remove_pusher(cmd.app_id, cmd.push_key, cmd.user_id)
+        return self.streamer.on_remove_pusher(
+            cmd.app_id, cmd.push_key, cmd.user_id,
+        )
 
     def on_INVALIDATE_CACHE(self, cmd):
-        self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys)
+        return self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys)
 
     def on_USER_IP(self, cmd):
-        self.streamer.on_user_ip(
+        return self.streamer.on_user_ip(
             cmd.user_id, cmd.access_token, cmd.ip, cmd.user_agent, cmd.device_id,
             cmd.last_seen,
         )
@@ -534,14 +559,13 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
             # Check if this is the last of a batch of updates
             rows = self.pending_batches.pop(stream_name, [])
             rows.append(row)
-
-            self.handler.on_rdata(stream_name, cmd.token, rows)
+            return self.handler.on_rdata(stream_name, cmd.token, rows)
 
     def on_POSITION(self, cmd):
-        self.handler.on_position(cmd.stream_name, cmd.token)
+        return self.handler.on_position(cmd.stream_name, cmd.token)
 
     def on_SYNC(self, cmd):
-        self.handler.on_sync(cmd.data)
+        return self.handler.on_sync(cmd.data)
 
     def replicate(self, stream_name, token):
         """Send the subscription request to the server
@@ -564,11 +588,13 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
 # The following simply registers metrics for the replication connections
 
 pending_commands = LaterGauge(
-    "pending_commands", "", ["name", "conn_id"],
+    "synapse_replication_tcp_protocol_pending_commands",
+    "",
+    ["name", "conn_id"],
     lambda: {
-        (p.name, p.conn_id): len(p.pending_commands)
-        for p in connected_connections
-    })
+        (p.name, p.conn_id): len(p.pending_commands) for p in connected_connections
+    },
+)
 
 
 def transport_buffer_size(protocol):
@@ -579,11 +605,13 @@ def transport_buffer_size(protocol):
 
 
 transport_send_buffer = LaterGauge(
-    "synapse_replication_tcp_transport_send_buffer", "", ["name", "conn_id"],
+    "synapse_replication_tcp_protocol_transport_send_buffer",
+    "",
+    ["name", "conn_id"],
     lambda: {
-        (p.name, p.conn_id): transport_buffer_size(p)
-        for p in connected_connections
-    })
+        (p.name, p.conn_id): transport_buffer_size(p) for p in connected_connections
+    },
+)
 
 
 def transport_kernel_read_buffer_size(protocol, read=True):
@@ -602,37 +630,50 @@ def transport_kernel_read_buffer_size(protocol, read=True):
 
 
 tcp_transport_kernel_send_buffer = LaterGauge(
-    "synapse_replication_tcp_transport_kernel_send_buffer", "", ["name", "conn_id"],
+    "synapse_replication_tcp_protocol_transport_kernel_send_buffer",
+    "",
+    ["name", "conn_id"],
     lambda: {
         (p.name, p.conn_id): transport_kernel_read_buffer_size(p, False)
         for p in connected_connections
-    })
+    },
+)
 
 
 tcp_transport_kernel_read_buffer = LaterGauge(
-    "synapse_replication_tcp_transport_kernel_read_buffer", "", ["name", "conn_id"],
+    "synapse_replication_tcp_protocol_transport_kernel_read_buffer",
+    "",
+    ["name", "conn_id"],
     lambda: {
         (p.name, p.conn_id): transport_kernel_read_buffer_size(p, True)
         for p in connected_connections
-    })
+    },
+)
 
 
 tcp_inbound_commands = LaterGauge(
-    "synapse_replication_tcp_inbound_commands", "", ["command", "name", "conn_id"],
+    "synapse_replication_tcp_protocol_inbound_commands",
+    "",
+    ["command", "name", "conn_id"],
     lambda: {
         (k[0], p.name, p.conn_id): count
         for p in connected_connections
         for k, count in iteritems(p.inbound_commands_counter)
-    })
+    },
+)
 
 tcp_outbound_commands = LaterGauge(
-    "synapse_replication_tcp_outbound_commands", "", ["command", "name", "conn_id"],
+    "synapse_replication_tcp_protocol_outbound_commands",
+    "",
+    ["command", "name", "conn_id"],
     lambda: {
         (k[0], p.name, p.conn_id): count
         for p in connected_connections
         for k, count in iteritems(p.outbound_commands_counter)
-    })
+    },
+)
 
 # number of updates received for each RDATA stream
-inbound_rdata_count = Counter("synapse_replication_tcp_inbound_rdata_count", "",
-                              ["stream_name"])
+inbound_rdata_count = Counter(
+    "synapse_replication_tcp_protocol_inbound_rdata_count", "", ["stream_name"]
+)
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index 63bd6d2652..fd59f1595f 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -15,19 +15,21 @@
 """The server side of the replication stream.
 """
 
-from twisted.internet import defer, reactor
-from twisted.internet.protocol import Factory
+import logging
 
-from .streams import STREAMS_MAP, FederationStream
-from .protocol import ServerReplicationStreamProtocol
+from six import itervalues
 
-from synapse.util.metrics import Measure, measure_func
-from synapse.metrics import LaterGauge
+from prometheus_client import Counter
 
-import logging
+from twisted.internet import defer
+from twisted.internet.protocol import Factory
 
-from prometheus_client import Counter
-from six import itervalues
+from synapse.metrics import LaterGauge
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.metrics import Measure, measure_func
+
+from .protocol import ServerReplicationStreamProtocol
+from .streams import STREAMS_MAP, FederationStream
 
 stream_updates_counter = Counter("synapse_replication_tcp_resource_stream_updates",
                                  "", ["stream_name"])
@@ -109,14 +111,13 @@ class ReplicationStreamer(object):
         self.is_looping = False
         self.pending_updates = False
 
-        reactor.addSystemEventTrigger("before", "shutdown", self.on_shutdown)
+        hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.on_shutdown)
 
     def on_shutdown(self):
         # close all connections on shutdown
         for conn in self.connections:
             conn.send_error("server shutting down")
 
-    @defer.inlineCallbacks
     def on_notifier_poke(self):
         """Checks if there is actually any new data and sends it to the
         connections if there are.
@@ -131,14 +132,16 @@ class ReplicationStreamer(object):
                 stream.discard_updates_and_advance()
             return
 
-        # If we're in the process of checking for new updates, mark that fact
-        # and return
+        self.pending_updates = True
+
         if self.is_looping:
-            logger.debug("Noitifier poke loop already running")
-            self.pending_updates = True
+            logger.debug("Notifier poke loop already running")
             return
 
-        self.pending_updates = True
+        run_as_background_process("replication_notifier", self._run_notifier_loop)
+
+    @defer.inlineCallbacks
+    def _run_notifier_loop(self):
         self.is_looping = True
 
         try:
diff --git a/synapse/replication/tcp/streams.py b/synapse/replication/tcp/streams.py
index 4c60bf79f9..55fe701c5c 100644
--- a/synapse/replication/tcp/streams.py
+++ b/synapse/replication/tcp/streams.py
@@ -24,11 +24,10 @@ Each stream is defined by the following information:
     update_function:    The function that returns a list of updates between two tokens
 """
 
-from twisted.internet import defer
-from collections import namedtuple
-
 import logging
+from collections import namedtuple
 
+from twisted.internet import defer
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 16f5a73b95..3418f06fd6 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,49 +14,50 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.rest.client import (
-    versions,
-)
+from six import PY3
 
+from synapse.http.server import JsonResource
+from synapse.rest.client import versions
 from synapse.rest.client.v1 import (
-    room,
+    admin,
+    directory,
     events,
-    profile,
-    presence,
     initial_sync,
-    directory,
-    voip,
-    admin,
-    pusher,
-    push_rule,
-    register as v1_register,
     login as v1_login,
     logout,
+    presence,
+    profile,
+    push_rule,
+    pusher,
+    room,
+    voip,
 )
-
 from synapse.rest.client.v2_alpha import (
-    sync,
-    filter,
     account,
-    register,
+    account_data,
     auth,
-    receipts,
-    read_marker,
+    devices,
+    filter,
+    groups,
     keys,
-    tokenrefresh,
-    tags,
-    account_data,
-    report_event,
-    openid,
     notifications,
-    devices,
-    thirdparty,
+    openid,
+    read_marker,
+    receipts,
+    register,
+    report_event,
     sendtodevice,
+    sync,
+    tags,
+    thirdparty,
+    tokenrefresh,
     user_directory,
-    groups,
 )
 
-from synapse.http.server import JsonResource
+if not PY3:
+    from synapse.rest.client.v1_only import (
+        register as v1_register,
+    )
 
 
 class ClientRestResource(JsonResource):
@@ -69,14 +71,22 @@ class ClientRestResource(JsonResource):
     def register_servlets(client_resource, hs):
         versions.register_servlets(client_resource)
 
-        # "v1"
-        room.register_servlets(hs, client_resource)
+        if not PY3:
+            # "v1" (Python 2 only)
+            v1_register.register_servlets(hs, client_resource)
+
+        # Deprecated in r0
+        initial_sync.register_servlets(hs, client_resource)
+        room.register_deprecated_servlets(hs, client_resource)
+
+        # Partially deprecated in r0
         events.register_servlets(hs, client_resource)
-        v1_register.register_servlets(hs, client_resource)
+
+        # "v1" + "r0"
+        room.register_servlets(hs, client_resource)
         v1_login.register_servlets(hs, client_resource)
         profile.register_servlets(hs, client_resource)
         presence.register_servlets(hs, client_resource)
-        initial_sync.register_servlets(hs, client_resource)
         directory.register_servlets(hs, client_resource)
         voip.register_servlets(hs, client_resource)
         admin.register_servlets(hs, client_resource)
diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py
index 7c01b438cb..48c17f1b6d 100644
--- a/synapse/rest/client/transactions.py
+++ b/synapse/rest/client/transactions.py
@@ -17,38 +17,20 @@
 to ensure idempotency when performing PUTs using the REST API."""
 import logging
 
-from synapse.api.auth import get_access_token_from_request
-from synapse.util.async import ObservableDeferred
+from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 
 logger = logging.getLogger(__name__)
 
-
-def get_transaction_key(request):
-    """A helper function which returns a transaction key that can be used
-    with TransactionCache for idempotent requests.
-
-    Idempotency is based on the returned key being the same for separate
-    requests to the same endpoint. The key is formed from the HTTP request
-    path and the access_token for the requesting user.
-
-    Args:
-        request (twisted.web.http.Request): The incoming request. Must
-        contain an access_token.
-    Returns:
-        str: A transaction key
-    """
-    token = get_access_token_from_request(request)
-    return request.path + "/" + token
-
-
 CLEANUP_PERIOD_MS = 1000 * 60 * 30  # 30 mins
 
 
 class HttpTransactionCache(object):
 
-    def __init__(self, clock):
-        self.clock = clock
+    def __init__(self, hs):
+        self.hs = hs
+        self.auth = self.hs.get_auth()
+        self.clock = self.hs.get_clock()
         self.transactions = {
             # $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp)
         }
@@ -56,6 +38,23 @@ class HttpTransactionCache(object):
         # for at *LEAST* 30 mins, and at *MOST* 60 mins.
         self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS)
 
+    def _get_transaction_key(self, request):
+        """A helper function which returns a transaction key that can be used
+        with TransactionCache for idempotent requests.
+
+        Idempotency is based on the returned key being the same for separate
+        requests to the same endpoint. The key is formed from the HTTP request
+        path and the access_token for the requesting user.
+
+        Args:
+            request (twisted.web.http.Request): The incoming request. Must
+            contain an access_token.
+        Returns:
+            str: A transaction key
+        """
+        token = self.auth.get_access_token_from_request(request)
+        return request.path.decode('utf8') + "/" + token
+
     def fetch_or_execute_request(self, request, fn, *args, **kwargs):
         """A helper function for fetch_or_execute which extracts
         a transaction key from the given request.
@@ -64,7 +63,7 @@ class HttpTransactionCache(object):
             fetch_or_execute
         """
         return self.fetch_or_execute(
-            get_transaction_key(request), fn, *args, **kwargs
+            self._get_transaction_key(request), fn, *args, **kwargs
         )
 
     def fetch_or_execute(self, txn_key, fn, *args, **kwargs):
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py
index b8665a45eb..ad536ab570 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/client/v1/admin.py
@@ -14,17 +14,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import hashlib
+import hmac
+import logging
+
+from six import text_type
+from six.moves import http_client
+
 from twisted.internet import defer
 
 from synapse.api.constants import Membership
-from synapse.api.errors import AuthError, SynapseError, Codes, NotFoundError
+from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
+from synapse.http.servlet import (
+    assert_params_in_dict,
+    parse_integer,
+    parse_json_object_from_request,
+    parse_string,
+)
 from synapse.types import UserID, create_requester
-from synapse.http.servlet import parse_json_object_from_request
 
 from .base import ClientV1RestServlet, client_path_patterns
 
-import logging
-
 logger = logging.getLogger(__name__)
 
 
@@ -56,6 +66,132 @@ class UsersRestServlet(ClientV1RestServlet):
         defer.returnValue((200, ret))
 
 
+class UserRegisterServlet(ClientV1RestServlet):
+    """
+    Attributes:
+         NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted
+         nonces (dict[str, int]): The nonces that we will accept. A dict of
+             nonce to the time it was generated, in int seconds.
+    """
+    PATTERNS = client_path_patterns("/admin/register")
+    NONCE_TIMEOUT = 60
+
+    def __init__(self, hs):
+        super(UserRegisterServlet, self).__init__(hs)
+        self.handlers = hs.get_handlers()
+        self.reactor = hs.get_reactor()
+        self.nonces = {}
+        self.hs = hs
+
+    def _clear_old_nonces(self):
+        """
+        Clear out old nonces that are older than NONCE_TIMEOUT.
+        """
+        now = int(self.reactor.seconds())
+
+        for k, v in list(self.nonces.items()):
+            if now - v > self.NONCE_TIMEOUT:
+                del self.nonces[k]
+
+    def on_GET(self, request):
+        """
+        Generate a new nonce.
+        """
+        self._clear_old_nonces()
+
+        nonce = self.hs.get_secrets().token_hex(64)
+        self.nonces[nonce] = int(self.reactor.seconds())
+        return (200, {"nonce": nonce.encode('ascii')})
+
+    @defer.inlineCallbacks
+    def on_POST(self, request):
+        self._clear_old_nonces()
+
+        if not self.hs.config.registration_shared_secret:
+            raise SynapseError(400, "Shared secret registration is not enabled")
+
+        body = parse_json_object_from_request(request)
+
+        if "nonce" not in body:
+            raise SynapseError(
+                400, "nonce must be specified", errcode=Codes.BAD_JSON,
+            )
+
+        nonce = body["nonce"]
+
+        if nonce not in self.nonces:
+            raise SynapseError(
+                400, "unrecognised nonce",
+            )
+
+        # Delete the nonce, so it can't be reused, even if it's invalid
+        del self.nonces[nonce]
+
+        if "username" not in body:
+            raise SynapseError(
+                400, "username must be specified", errcode=Codes.BAD_JSON,
+            )
+        else:
+            if (
+                not isinstance(body['username'], text_type)
+                or len(body['username']) > 512
+            ):
+                raise SynapseError(400, "Invalid username")
+
+            username = body["username"].encode("utf-8")
+            if b"\x00" in username:
+                raise SynapseError(400, "Invalid username")
+
+        if "password" not in body:
+            raise SynapseError(
+                400, "password must be specified", errcode=Codes.BAD_JSON,
+            )
+        else:
+            if (
+                not isinstance(body['password'], text_type)
+                or len(body['password']) > 512
+            ):
+                raise SynapseError(400, "Invalid password")
+
+            password = body["password"].encode("utf-8")
+            if b"\x00" in password:
+                raise SynapseError(400, "Invalid password")
+
+        admin = body.get("admin", None)
+        got_mac = body["mac"]
+
+        want_mac = hmac.new(
+            key=self.hs.config.registration_shared_secret.encode(),
+            digestmod=hashlib.sha1,
+        )
+        want_mac.update(nonce)
+        want_mac.update(b"\x00")
+        want_mac.update(username)
+        want_mac.update(b"\x00")
+        want_mac.update(password)
+        want_mac.update(b"\x00")
+        want_mac.update(b"admin" if admin else b"notadmin")
+        want_mac = want_mac.hexdigest()
+
+        if not hmac.compare_digest(want_mac, got_mac.encode('ascii')):
+            raise SynapseError(403, "HMAC incorrect")
+
+        # Reuse the parts of RegisterRestServlet to reduce code duplication
+        from synapse.rest.client.v2_alpha.register import RegisterRestServlet
+
+        register = RegisterRestServlet(self.hs)
+
+        (user_id, _) = yield register.registration_handler.register(
+            localpart=body['username'].lower(),
+            password=body["password"],
+            admin=bool(admin),
+            generate_token=False,
+        )
+
+        result = yield register._create_registration_details(user_id, body)
+        defer.returnValue((200, result))
+
+
 class WhoisRestServlet(ClientV1RestServlet):
     PATTERNS = client_path_patterns("/admin/whois/(?P<user_id>[^/]*)")
 
@@ -96,16 +232,8 @@ class PurgeMediaCacheRestServlet(ClientV1RestServlet):
         if not is_admin:
             raise AuthError(403, "You are not a server admin")
 
-        before_ts = request.args.get("before_ts", None)
-        if not before_ts:
-            raise SynapseError(400, "Missing 'before_ts' arg")
-
-        logger.info("before_ts: %r", before_ts[0])
-
-        try:
-            before_ts = int(before_ts[0])
-        except Exception:
-            raise SynapseError(400, "Invalid 'before_ts' arg")
+        before_ts = parse_integer(request, "before_ts", required=True)
+        logger.info("before_ts: %r", before_ts)
 
         ret = yield self.media_repository.delete_old_remote_media(before_ts)
 
@@ -124,7 +252,7 @@ class PurgeHistoryRestServlet(ClientV1RestServlet):
             hs (synapse.server.HomeServer)
         """
         super(PurgeHistoryRestServlet, self).__init__(hs)
-        self.handlers = hs.get_handlers()
+        self.pagination_handler = hs.get_pagination_handler()
         self.store = hs.get_datastore()
 
     @defer.inlineCallbacks
@@ -199,7 +327,7 @@ class PurgeHistoryRestServlet(ClientV1RestServlet):
                 errcode=Codes.BAD_JSON,
             )
 
-        purge_id = yield self.handlers.message_handler.start_purge_history(
+        purge_id = yield self.pagination_handler.start_purge_history(
             room_id, token,
             delete_local_events=delete_local_events,
         )
@@ -221,7 +349,7 @@ class PurgeHistoryStatusRestServlet(ClientV1RestServlet):
             hs (synapse.server.HomeServer)
         """
         super(PurgeHistoryStatusRestServlet, self).__init__(hs)
-        self.handlers = hs.get_handlers()
+        self.pagination_handler = hs.get_pagination_handler()
 
     @defer.inlineCallbacks
     def on_GET(self, request, purge_id):
@@ -231,7 +359,7 @@ class PurgeHistoryStatusRestServlet(ClientV1RestServlet):
         if not is_admin:
             raise AuthError(403, "You are not a server admin")
 
-        purge_status = self.handlers.message_handler.get_purge_status(purge_id)
+        purge_status = self.pagination_handler.get_purge_status(purge_id)
         if purge_status is None:
             raise NotFoundError("purge id '%s' not found" % purge_id)
 
@@ -247,6 +375,15 @@ class DeactivateAccountRestServlet(ClientV1RestServlet):
 
     @defer.inlineCallbacks
     def on_POST(self, request, target_user_id):
+        body = parse_json_object_from_request(request, allow_empty_body=True)
+        erase = body.get("erase", False)
+        if not isinstance(erase, bool):
+            raise SynapseError(
+                http_client.BAD_REQUEST,
+                "Param 'erase' must be a boolean, if given",
+                Codes.BAD_JSON,
+            )
+
         UserID.from_string(target_user_id)
         requester = yield self.auth.get_user_by_req(request)
         is_admin = yield self.auth.is_server_admin(requester.user)
@@ -254,8 +391,17 @@ class DeactivateAccountRestServlet(ClientV1RestServlet):
         if not is_admin:
             raise AuthError(403, "You are not a server admin")
 
-        yield self._deactivate_account_handler.deactivate_account(target_user_id)
-        defer.returnValue((200, {}))
+        result = yield self._deactivate_account_handler.deactivate_account(
+            target_user_id, erase,
+        )
+        if result:
+            id_server_unbind_result = "success"
+        else:
+            id_server_unbind_result = "no-support"
+
+        defer.returnValue((200, {
+            "id_server_unbind_result": id_server_unbind_result,
+        }))
 
 
 class ShutdownRoomRestServlet(ClientV1RestServlet):
@@ -287,10 +433,8 @@ class ShutdownRoomRestServlet(ClientV1RestServlet):
             raise AuthError(403, "You are not a server admin")
 
         content = parse_json_object_from_request(request)
-
-        new_room_user_id = content.get("new_room_user_id")
-        if not new_room_user_id:
-            raise SynapseError(400, "Please provide field `new_room_user_id`")
+        assert_params_in_dict(content, ["new_room_user_id"])
+        new_room_user_id = content["new_room_user_id"]
 
         room_creator_requester = create_requester(new_room_user_id)
 
@@ -451,9 +595,8 @@ class ResetPasswordRestServlet(ClientV1RestServlet):
             raise AuthError(403, "You are not a server admin")
 
         params = parse_json_object_from_request(request)
+        assert_params_in_dict(params, ["new_password"])
         new_password = params['new_password']
-        if not new_password:
-            raise SynapseError(400, "Missing 'new_password' arg")
 
         logger.info("new_password: %r", new_password)
 
@@ -501,12 +644,9 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
             raise SynapseError(400, "Can only users a local user")
 
         order = "name"  # order by name in user table
-        start = request.args.get("start")[0]
-        limit = request.args.get("limit")[0]
-        if not limit:
-            raise SynapseError(400, "Missing 'limit' arg")
-        if not start:
-            raise SynapseError(400, "Missing 'start' arg")
+        start = parse_integer(request, "start", required=True)
+        limit = parse_integer(request, "limit", required=True)
+
         logger.info("limit: %s, start: %s", limit, start)
 
         ret = yield self.handlers.admin_handler.get_users_paginate(
@@ -538,12 +678,9 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
 
         order = "name"  # order by name in user table
         params = parse_json_object_from_request(request)
+        assert_params_in_dict(params, ["limit", "start"])
         limit = params['limit']
         start = params['start']
-        if not limit:
-            raise SynapseError(400, "Missing 'limit' arg")
-        if not start:
-            raise SynapseError(400, "Missing 'start' arg")
         logger.info("limit: %s, start: %s", limit, start)
 
         ret = yield self.handlers.admin_handler.get_users_paginate(
@@ -591,10 +728,7 @@ class SearchUsersRestServlet(ClientV1RestServlet):
         if not self.hs.is_mine(target_user):
             raise SynapseError(400, "Can only users a local user")
 
-        term = request.args.get("term")[0]
-        if not term:
-            raise SynapseError(400, "Missing 'term' arg")
-
+        term = parse_string(request, "term", required=True)
         logger.info("term: %s ", term)
 
         ret = yield self.handlers.admin_handler.search_users(
@@ -616,3 +750,4 @@ def register_servlets(hs, http_server):
     ShutdownRoomRestServlet(hs).register(http_server)
     QuarantineMediaInRoom(hs).register(http_server)
     ListMediaInRoom(hs).register(http_server)
+    UserRegisterServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py
index 197335d7aa..c77d7aba68 100644
--- a/synapse/rest/client/v1/base.py
+++ b/synapse/rest/client/v1/base.py
@@ -16,14 +16,12 @@
 """This module contains base REST classes for constructing client v1 servlets.
 """
 
-from synapse.http.servlet import RestServlet
-from synapse.api.urls import CLIENT_PREFIX
-from synapse.rest.client.transactions import HttpTransactionCache
-
-import re
-
 import logging
+import re
 
+from synapse.api.urls import CLIENT_PREFIX
+from synapse.http.servlet import RestServlet
+from synapse.rest.client.transactions import HttpTransactionCache
 
 logger = logging.getLogger(__name__)
 
@@ -64,4 +62,4 @@ class ClientV1RestServlet(RestServlet):
         self.hs = hs
         self.builder_factory = hs.get_event_builder_factory()
         self.auth = hs.get_auth()
-        self.txns = HttpTransactionCache(hs.get_clock())
+        self.txns = HttpTransactionCache(hs)
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py
index 1c3933380f..97733f3026 100644
--- a/synapse/rest/client/v1/directory.py
+++ b/synapse/rest/client/v1/directory.py
@@ -14,17 +14,16 @@
 # limitations under the License.
 
 
+import logging
+
 from twisted.internet import defer
 
-from synapse.api.errors import AuthError, SynapseError, Codes
-from synapse.types import RoomAlias
+from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
 from synapse.http.servlet import parse_json_object_from_request
+from synapse.types import RoomAlias
 
 from .base import ClientV1RestServlet, client_path_patterns
 
-import logging
-
-
 logger = logging.getLogger(__name__)
 
 
@@ -53,15 +52,14 @@ class ClientDirectoryServer(ClientV1RestServlet):
 
     @defer.inlineCallbacks
     def on_PUT(self, request, room_alias):
+        room_alias = RoomAlias.from_string(room_alias)
+
         content = parse_json_object_from_request(request)
         if "room_id" not in content:
-            raise SynapseError(400, "Missing room_id key",
+            raise SynapseError(400, 'Missing params: ["room_id"]',
                                errcode=Codes.BAD_JSON)
 
         logger.debug("Got content: %s", content)
-
-        room_alias = RoomAlias.from_string(room_alias)
-
         logger.debug("Got room name: %s", room_alias.to_string())
 
         room_id = content["room_id"]
@@ -161,7 +159,7 @@ class ClientDirectoryListServer(ClientV1RestServlet):
     def on_GET(self, request, room_id):
         room = yield self.store.get_room(room_id)
         if room is None:
-            raise SynapseError(400, "Unknown room")
+            raise NotFoundError("Unknown room")
 
         defer.returnValue((200, {
             "visibility": "public" if room["is_public"] else "private"
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index 701b6f549b..0f3a2e8b51 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -14,15 +14,15 @@
 # limitations under the License.
 
 """This module contains REST servlets to do with event streaming, /events."""
+import logging
+
 from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
-from synapse.streams.config import PaginationConfig
-from .base import ClientV1RestServlet, client_path_patterns
 from synapse.events.utils import serialize_event
+from synapse.streams.config import PaginationConfig
 
-import logging
-
+from .base import ClientV1RestServlet, client_path_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -88,7 +88,7 @@ class EventRestServlet(ClientV1RestServlet):
     @defer.inlineCallbacks
     def on_GET(self, request, event_id):
         requester = yield self.auth.get_user_by_req(request)
-        event = yield self.event_handler.get_event(requester.user, event_id)
+        event = yield self.event_handler.get_event(requester.user, None, event_id)
 
         time_now = self.clock.time_msec()
         if event:
diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py
index 478e21eea8..fd5f85b53e 100644
--- a/synapse/rest/client/v1/initial_sync.py
+++ b/synapse/rest/client/v1/initial_sync.py
@@ -15,7 +15,9 @@
 
 from twisted.internet import defer
 
+from synapse.http.servlet import parse_boolean
 from synapse.streams.config import PaginationConfig
+
 from .base import ClientV1RestServlet, client_path_patterns
 
 
@@ -32,7 +34,7 @@ class InitialSyncRestServlet(ClientV1RestServlet):
         requester = yield self.auth.get_user_by_req(request)
         as_client_event = "raw" not in request.args
         pagination_config = PaginationConfig.from_request(request)
-        include_archived = request.args.get("archived", None) == ["true"]
+        include_archived = parse_boolean(request, "archived", default=False)
         content = yield self.initial_sync_handler.snapshot_all_rooms(
             user_id=requester.user.to_string(),
             pagin_config=pagination_config,
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 34df5be4e9..cb85fa1436 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -13,29 +13,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
-
-from synapse.api.errors import SynapseError, LoginError, Codes
-from synapse.types import UserID
-from synapse.http.server import finish_request
-from synapse.http.servlet import parse_json_object_from_request
-from synapse.util.msisdn import phone_number_to_msisdn
-
-from .base import ClientV1RestServlet, client_path_patterns
-
-import simplejson as json
+import logging
 import urllib
+import xml.etree.ElementTree as ET
+
 from six.moves.urllib import parse as urlparse
 
-import logging
-from saml2 import BINDING_HTTP_POST
-from saml2 import config
+from canonicaljson import json
+from saml2 import BINDING_HTTP_POST, config
 from saml2.client import Saml2Client
 
-import xml.etree.ElementTree as ET
-
+from twisted.internet import defer
 from twisted.web.client import PartialDownloadError
 
+from synapse.api.errors import Codes, LoginError, SynapseError
+from synapse.http.server import finish_request
+from synapse.http.servlet import parse_json_object_from_request
+from synapse.types import UserID
+from synapse.util.msisdn import phone_number_to_msisdn
+
+from .base import ClientV1RestServlet, client_path_patterns
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py
index e092158cb7..430c692336 100644
--- a/synapse/rest/client/v1/logout.py
+++ b/synapse/rest/client/v1/logout.py
@@ -13,16 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
-from synapse.api.auth import get_access_token_from_request
 from synapse.api.errors import AuthError
 
 from .base import ClientV1RestServlet, client_path_patterns
 
-import logging
-
-
 logger = logging.getLogger(__name__)
 
 
@@ -52,7 +50,7 @@ class LogoutRestServlet(ClientV1RestServlet):
             if requester.device_id is None:
                 # the acccess token wasn't associated with a device.
                 # Just delete the access token
-                access_token = get_access_token_from_request(request)
+                access_token = self._auth.get_access_token_from_request(request)
                 yield self._auth_handler.delete_access_token(access_token)
             else:
                 yield self._device_handler.delete_device(
diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py
index 647994bd53..b5a6d6aebf 100644
--- a/synapse/rest/client/v1/presence.py
+++ b/synapse/rest/client/v1/presence.py
@@ -15,17 +15,18 @@
 
 """ This module contains REST servlets to do with presence: /presence/<paths>
 """
+import logging
+
+from six import string_types
+
 from twisted.internet import defer
 
-from synapse.api.errors import SynapseError, AuthError
-from synapse.types import UserID
+from synapse.api.errors import AuthError, SynapseError
 from synapse.handlers.presence import format_user_presence_state
 from synapse.http.servlet import parse_json_object_from_request
-from .base import ClientV1RestServlet, client_path_patterns
-
-from six import string_types
+from synapse.types import UserID
 
-import logging
+from .base import ClientV1RestServlet, client_path_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -83,7 +84,8 @@ class PresenceStatusRestServlet(ClientV1RestServlet):
         except Exception:
             raise SynapseError(400, "Unable to parse state")
 
-        yield self.presence_handler.set_state(user, state)
+        if self.hs.config.use_presence:
+            yield self.presence_handler.set_state(user, state)
 
         defer.returnValue((200, {}))
 
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index e4e3611a14..a23edd8fe5 100644
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/v1/profile.py
@@ -16,9 +16,10 @@
 """ This module contains REST servlets to do with profile: /profile/<paths> """
 from twisted.internet import defer
 
-from .base import ClientV1RestServlet, client_path_patterns
-from synapse.types import UserID
 from synapse.http.servlet import parse_json_object_from_request
+from synapse.types import UserID
+
+from .base import ClientV1RestServlet, client_path_patterns
 
 
 class ProfileDisplaynameRestServlet(ClientV1RestServlet):
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index 6bb4821ec6..6e95d9bec2 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -16,16 +16,18 @@
 from twisted.internet import defer
 
 from synapse.api.errors import (
-    SynapseError, UnrecognizedRequestError, NotFoundError, StoreError
+    NotFoundError,
+    StoreError,
+    SynapseError,
+    UnrecognizedRequestError,
 )
-from .base import ClientV1RestServlet, client_path_patterns
-from synapse.storage.push_rule import (
-    InconsistentRuleException, RuleNotFoundException
-)
-from synapse.push.clientformat import format_push_rules_for_user
+from synapse.http.servlet import parse_json_value_from_request, parse_string
 from synapse.push.baserules import BASE_RULE_IDS
+from synapse.push.clientformat import format_push_rules_for_user
 from synapse.push.rulekinds import PRIORITY_CLASS_MAP
-from synapse.http.servlet import parse_json_value_from_request
+from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
+
+from .base import ClientV1RestServlet, client_path_patterns
 
 
 class PushRuleRestServlet(ClientV1RestServlet):
@@ -73,13 +75,13 @@ class PushRuleRestServlet(ClientV1RestServlet):
         except InvalidRuleException as e:
             raise SynapseError(400, e.message)
 
-        before = request.args.get("before", None)
+        before = parse_string(request, "before")
         if before:
-            before = _namespaced_rule_id(spec, before[0])
+            before = _namespaced_rule_id(spec, before)
 
-        after = request.args.get("after", None)
+        after = parse_string(request, "after")
         if after:
-            after = _namespaced_rule_id(spec, after[0])
+            after = _namespaced_rule_id(spec, after)
 
         try:
             yield self.store.add_push_rule(
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index 40e523cc5f..182a68b1e2 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -13,20 +13,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
-from synapse.api.errors import SynapseError, Codes
-from synapse.push import PusherConfigException
+from synapse.api.errors import Codes, StoreError, SynapseError
+from synapse.http.server import finish_request
 from synapse.http.servlet import (
-    parse_json_object_from_request, parse_string, RestServlet
+    RestServlet,
+    assert_params_in_dict,
+    parse_json_object_from_request,
+    parse_string,
 )
-from synapse.http.server import finish_request
-from synapse.api.errors import StoreError
+from synapse.push import PusherConfigException
 
 from .base import ClientV1RestServlet, client_path_patterns
 
-import logging
-
 logger = logging.getLogger(__name__)
 
 
@@ -90,15 +92,11 @@ class PushersSetRestServlet(ClientV1RestServlet):
             )
             defer.returnValue((200, {}))
 
-        reqd = ['kind', 'app_id', 'app_display_name',
-                'device_display_name', 'pushkey', 'lang', 'data']
-        missing = []
-        for i in reqd:
-            if i not in content:
-                missing.append(i)
-        if len(missing):
-            raise SynapseError(400, "Missing parameters: " + ','.join(missing),
-                               errcode=Codes.MISSING_PARAM)
+        assert_params_in_dict(
+            content,
+            ['kind', 'app_id', 'app_display_name',
+             'device_display_name', 'pushkey', 'lang', 'data']
+        )
 
         logger.debug("set pushkey %s to kind %s", content['pushkey'], content['kind'])
         logger.debug("Got pushers request with body: %r", content)
@@ -147,7 +145,7 @@ class PushersRemoveRestServlet(RestServlet):
     SUCCESS_HTML = "<html><body>You have been unsubscribed</body><html>"
 
     def __init__(self, hs):
-        super(RestServlet, self).__init__()
+        super(PushersRemoveRestServlet, self).__init__()
         self.hs = hs
         self.notifier = hs.get_notifier()
         self.auth = hs.get_auth()
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 0b984987ed..976d98387d 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -15,23 +15,28 @@
 # limitations under the License.
 
 """ This module contains REST servlets to do with rooms: /rooms/<paths> """
+import logging
+
+from six.moves.urllib import parse as urlparse
+
+from canonicaljson import json
+
 from twisted.internet import defer
 
-from .base import ClientV1RestServlet, client_path_patterns
-from synapse.api.errors import SynapseError, Codes, AuthError
-from synapse.streams.config import PaginationConfig
 from synapse.api.constants import EventTypes, Membership
+from synapse.api.errors import AuthError, Codes, SynapseError
 from synapse.api.filtering import Filter
-from synapse.types import UserID, RoomID, RoomAlias, ThirdPartyInstanceID
-from synapse.events.utils import serialize_event, format_event_for_client_v2
+from synapse.events.utils import format_event_for_client_v2, serialize_event
 from synapse.http.servlet import (
-    parse_json_object_from_request, parse_string, parse_integer
+    assert_params_in_dict,
+    parse_integer,
+    parse_json_object_from_request,
+    parse_string,
 )
+from synapse.streams.config import PaginationConfig
+from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID
 
-from six.moves.urllib import parse as urlparse
-
-import logging
-import simplejson as json
+from .base import ClientV1RestServlet, client_path_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -85,6 +90,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
         self.handlers = hs.get_handlers()
         self.event_creation_hander = hs.get_event_creation_handler()
         self.room_member_handler = hs.get_room_member_handler()
+        self.message_handler = hs.get_message_handler()
 
     def register(self, http_server):
         # /room/$roomid/state/$eventtype
@@ -119,7 +125,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
         format = parse_string(request, "format", default="content",
                               allowed_values=["content", "event"])
 
-        msg_handler = self.handlers.message_handler
+        msg_handler = self.message_handler
         data = yield msg_handler.get_room_data(
             user_id=requester.user.to_string(),
             room_id=room_id,
@@ -372,22 +378,45 @@ class RoomMemberListRestServlet(ClientV1RestServlet):
 
     def __init__(self, hs):
         super(RoomMemberListRestServlet, self).__init__(hs)
-        self.handlers = hs.get_handlers()
+        self.message_handler = hs.get_message_handler()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id):
         # TODO support Pagination stream API (limit/tokens)
         requester = yield self.auth.get_user_by_req(request)
-        handler = self.handlers.message_handler
+        handler = self.message_handler
+
+        # request the state as of a given event, as identified by a stream token,
+        # for consistency with /messages etc.
+        # useful for getting the membership in retrospect as of a given /sync
+        # response.
+        at_token_string = parse_string(request, "at")
+        if at_token_string is None:
+            at_token = None
+        else:
+            at_token = StreamToken.from_string(at_token_string)
+
+        # let you filter down on particular memberships.
+        # XXX: this may not be the best shape for this API - we could pass in a filter
+        # instead, except filters aren't currently aware of memberships.
+        # See https://github.com/matrix-org/matrix-doc/issues/1337 for more details.
+        membership = parse_string(request, "membership")
+        not_membership = parse_string(request, "not_membership")
+
         events = yield handler.get_state_events(
             room_id=room_id,
             user_id=requester.user.to_string(),
+            at_token=at_token,
+            types=[(EventTypes.Member, None)],
         )
 
         chunk = []
 
         for event in events:
-            if event["type"] != EventTypes.Member:
+            if (
+                (membership and event['content'].get("membership") != membership) or
+                (not_membership and event['content'].get("membership") == not_membership)
+            ):
                 continue
             chunk.append(event)
 
@@ -396,12 +425,14 @@ class RoomMemberListRestServlet(ClientV1RestServlet):
         }))
 
 
+# deprecated in favour of /members?membership=join?
+# except it does custom AS logic and has a simpler return format
 class JoinedRoomMemberListRestServlet(ClientV1RestServlet):
     PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/joined_members$")
 
     def __init__(self, hs):
         super(JoinedRoomMemberListRestServlet, self).__init__(hs)
-        self.message_handler = hs.get_handlers().message_handler
+        self.message_handler = hs.get_message_handler()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id):
@@ -422,7 +453,7 @@ class RoomMessageListRestServlet(ClientV1RestServlet):
 
     def __init__(self, hs):
         super(RoomMessageListRestServlet, self).__init__(hs)
-        self.handlers = hs.get_handlers()
+        self.pagination_handler = hs.get_pagination_handler()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id):
@@ -431,14 +462,13 @@ class RoomMessageListRestServlet(ClientV1RestServlet):
             request, default_limit=10,
         )
         as_client_event = "raw" not in request.args
-        filter_bytes = request.args.get("filter", None)
+        filter_bytes = parse_string(request, "filter")
         if filter_bytes:
-            filter_json = urlparse.unquote(filter_bytes[-1]).decode("UTF-8")
+            filter_json = urlparse.unquote(filter_bytes).decode("UTF-8")
             event_filter = Filter(json.loads(filter_json))
         else:
             event_filter = None
-        handler = self.handlers.message_handler
-        msgs = yield handler.get_messages(
+        msgs = yield self.pagination_handler.get_messages(
             room_id=room_id,
             requester=requester,
             pagin_config=pagination_config,
@@ -455,14 +485,13 @@ class RoomStateRestServlet(ClientV1RestServlet):
 
     def __init__(self, hs):
         super(RoomStateRestServlet, self).__init__(hs)
-        self.handlers = hs.get_handlers()
+        self.message_handler = hs.get_message_handler()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id):
         requester = yield self.auth.get_user_by_req(request, allow_guest=True)
-        handler = self.handlers.message_handler
         # Get all the current state for this room
-        events = yield handler.get_state_events(
+        events = yield self.message_handler.get_state_events(
             room_id=room_id,
             user_id=requester.user.to_string(),
             is_guest=requester.is_guest,
@@ -502,8 +531,8 @@ class RoomEventServlet(ClientV1RestServlet):
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id, event_id):
-        requester = yield self.auth.get_user_by_req(request)
-        event = yield self.event_handler.get_event(requester.user, event_id)
+        requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+        event = yield self.event_handler.get_event(requester.user, room_id, event_id)
 
         time_now = self.clock.time_msec()
         if event:
@@ -520,19 +549,28 @@ class RoomEventContextServlet(ClientV1RestServlet):
     def __init__(self, hs):
         super(RoomEventContextServlet, self).__init__(hs)
         self.clock = hs.get_clock()
-        self.handlers = hs.get_handlers()
+        self.room_context_handler = hs.get_room_context_handler()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id, event_id):
         requester = yield self.auth.get_user_by_req(request, allow_guest=True)
 
-        limit = int(request.args.get("limit", [10])[0])
+        limit = parse_integer(request, "limit", default=10)
 
-        results = yield self.handlers.room_context_handler.get_event_context(
+        # picking the API shape for symmetry with /messages
+        filter_bytes = parse_string(request, "filter")
+        if filter_bytes:
+            filter_json = urlparse.unquote(filter_bytes).decode("UTF-8")
+            event_filter = Filter(json.loads(filter_json))
+        else:
+            event_filter = None
+
+        results = yield self.room_context_handler.get_event_context(
             requester.user,
             room_id,
             event_id,
             limit,
+            event_filter,
         )
 
         if not results:
@@ -632,8 +670,7 @@ class RoomMembershipRestServlet(ClientV1RestServlet):
 
         target = requester.user
         if membership_action in ["invite", "ban", "unban", "kick"]:
-            if "user_id" not in content:
-                raise SynapseError(400, "Missing user_id key.")
+            assert_params_in_dict(content, ["user_id"])
             target = UserID.from_string(content["user_id"])
 
         event_content = None
@@ -760,7 +797,7 @@ class SearchRestServlet(ClientV1RestServlet):
 
         content = parse_json_object_from_request(request)
 
-        batch = request.args.get("next_batch", [None])[0]
+        batch = parse_string(request, "next_batch")
         results = yield self.handlers.search_handler.search(
             requester.user,
             content,
@@ -828,10 +865,13 @@ def register_servlets(hs, http_server):
     RoomSendEventRestServlet(hs).register(http_server)
     PublicRoomListRestServlet(hs).register(http_server)
     RoomStateRestServlet(hs).register(http_server)
-    RoomInitialSyncRestServlet(hs).register(http_server)
     RoomRedactEventRestServlet(hs).register(http_server)
     RoomTypingRestServlet(hs).register(http_server)
     SearchRestServlet(hs).register(http_server)
     JoinedRoomsRestServlet(hs).register(http_server)
     RoomEventServlet(hs).register(http_server)
     RoomEventContextServlet(hs).register(http_server)
+
+
+def register_deprecated_servlets(hs, http_server):
+    RoomInitialSyncRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py
index c43b30b73a..62f4c3d93e 100644
--- a/synapse/rest/client/v1/voip.py
+++ b/synapse/rest/client/v1/voip.py
@@ -13,16 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import base64
+import hashlib
+import hmac
+
 from twisted.internet import defer
 
 from .base import ClientV1RestServlet, client_path_patterns
 
 
-import hmac
-import hashlib
-import base64
-
-
 class VoipRestServlet(ClientV1RestServlet):
     PATTERNS = client_path_patterns("/voip/turnServer$")
 
diff --git a/synapse/rest/client/v1_only/__init__.py b/synapse/rest/client/v1_only/__init__.py
new file mode 100644
index 0000000000..936f902ace
--- /dev/null
+++ b/synapse/rest/client/v1_only/__init__.py
@@ -0,0 +1,3 @@
+"""
+REST APIs that are only used in v1 (the legacy API).
+"""
diff --git a/synapse/rest/client/v1_only/base.py b/synapse/rest/client/v1_only/base.py
new file mode 100644
index 0000000000..9d4db7437c
--- /dev/null
+++ b/synapse/rest/client/v1_only/base.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains base REST classes for constructing client v1 servlets.
+"""
+
+import re
+
+from synapse.api.urls import CLIENT_PREFIX
+
+
+def v1_only_client_path_patterns(path_regex, include_in_unstable=True):
+    """Creates a regex compiled client path with the correct client path
+    prefix.
+
+    Args:
+        path_regex (str): The regex string to match. This should NOT have a ^
+        as this will be prefixed.
+    Returns:
+        list of SRE_Pattern
+    """
+    patterns = [re.compile("^" + CLIENT_PREFIX + path_regex)]
+    if include_in_unstable:
+        unstable_prefix = CLIENT_PREFIX.replace("/api/v1", "/unstable")
+        patterns.append(re.compile("^" + unstable_prefix + path_regex))
+    return patterns
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1_only/register.py
index 9b3022e0b0..dadb376b02 100644
--- a/synapse/rest/client/v1/register.py
+++ b/synapse/rest/client/v1_only/register.py
@@ -14,23 +14,21 @@
 # limitations under the License.
 
 """This module contains REST servlets to do with registration: /register"""
+import hmac
+import logging
+from hashlib import sha1
+
 from twisted.internet import defer
 
-from synapse.api.errors import SynapseError, Codes
-from synapse.api.constants import LoginType
-from synapse.api.auth import get_access_token_from_request
-from .base import ClientV1RestServlet, client_path_patterns
 import synapse.util.stringutils as stringutils
-from synapse.http.servlet import parse_json_object_from_request
+from synapse.api.constants import LoginType
+from synapse.api.errors import Codes, SynapseError
+from synapse.config.server import is_threepid_reserved
+from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request
+from synapse.rest.client.v1.base import ClientV1RestServlet
 from synapse.types import create_requester
 
-from synapse.util.async import run_on_reactor
-
-from hashlib import sha1
-import hmac
-import logging
-
-from six import string_types
+from .base import v1_only_client_path_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -53,7 +51,7 @@ class RegisterRestServlet(ClientV1RestServlet):
     handler doesn't have a concept of multi-stages or sessions.
     """
 
-    PATTERNS = client_path_patterns("/register$", releases=(), include_in_unstable=False)
+    PATTERNS = v1_only_client_path_patterns("/register$", include_in_unstable=False)
 
     def __init__(self, hs):
         """
@@ -68,6 +66,7 @@ class RegisterRestServlet(ClientV1RestServlet):
         # TODO: persistent storage
         self.sessions = {}
         self.enable_registration = hs.config.enable_registration
+        self.auth = hs.get_auth()
         self.auth_handler = hs.get_auth_handler()
         self.handlers = hs.get_handlers()
 
@@ -125,19 +124,15 @@ class RegisterRestServlet(ClientV1RestServlet):
         session = (register_json["session"]
                    if "session" in register_json else None)
         login_type = None
-        if "type" not in register_json:
-            raise SynapseError(400, "Missing 'type' key.")
+        assert_params_in_dict(register_json, ["type"])
 
         try:
             login_type = register_json["type"]
 
             is_application_server = login_type == LoginType.APPLICATION_SERVICE
-            is_using_shared_secret = login_type == LoginType.SHARED_SECRET
-
             can_register = (
                 self.enable_registration
                 or is_application_server
-                or is_using_shared_secret
             )
             if not can_register:
                 raise SynapseError(403, "Registration has been disabled")
@@ -147,7 +142,6 @@ class RegisterRestServlet(ClientV1RestServlet):
                 LoginType.PASSWORD: self._do_password,
                 LoginType.EMAIL_IDENTITY: self._do_email_identity,
                 LoginType.APPLICATION_SERVICE: self._do_app_service,
-                LoginType.SHARED_SECRET: self._do_shared_secret,
             }
 
             session_info = self._get_session_info(request, session)
@@ -272,7 +266,6 @@ class RegisterRestServlet(ClientV1RestServlet):
 
     @defer.inlineCallbacks
     def _do_password(self, request, register_json, session):
-        yield run_on_reactor()
         if (self.hs.config.enable_registration_captcha and
                 not session[LoginType.RECAPTCHA]):
             # captcha should've been done by this stage!
@@ -289,12 +282,20 @@ class RegisterRestServlet(ClientV1RestServlet):
             register_json["user"].encode("utf-8")
             if "user" in register_json else None
         )
+        threepid = None
+        if session.get(LoginType.EMAIL_IDENTITY):
+            threepid = session["threepidCreds"]
 
         handler = self.handlers.registration_handler
         (user_id, token) = yield handler.register(
             localpart=desired_user_id,
-            password=password
+            password=password,
+            threepid=threepid,
         )
+        # Necessary due to auth checks prior to the threepid being
+        # written to the db
+        if is_threepid_reserved(self.hs.config, threepid):
+            yield self.store.upsert_monthly_active_user(user_id)
 
         if session[LoginType.EMAIL_IDENTITY]:
             logger.debug("Binding emails %s to %s" % (
@@ -312,11 +313,9 @@ class RegisterRestServlet(ClientV1RestServlet):
 
     @defer.inlineCallbacks
     def _do_app_service(self, request, register_json, session):
-        as_token = get_access_token_from_request(request)
-
-        if "user" not in register_json:
-            raise SynapseError(400, "Expected 'user' key.")
+        as_token = self.auth.get_access_token_from_request(request)
 
+        assert_params_in_dict(register_json, ["user"])
         user_localpart = register_json["user"].encode("utf-8")
 
         handler = self.handlers.registration_handler
@@ -331,69 +330,12 @@ class RegisterRestServlet(ClientV1RestServlet):
             "home_server": self.hs.hostname,
         })
 
-    @defer.inlineCallbacks
-    def _do_shared_secret(self, request, register_json, session):
-        yield run_on_reactor()
-
-        if not isinstance(register_json.get("mac", None), string_types):
-            raise SynapseError(400, "Expected mac.")
-        if not isinstance(register_json.get("user", None), string_types):
-            raise SynapseError(400, "Expected 'user' key.")
-        if not isinstance(register_json.get("password", None), string_types):
-            raise SynapseError(400, "Expected 'password' key.")
-
-        if not self.hs.config.registration_shared_secret:
-            raise SynapseError(400, "Shared secret registration is not enabled")
-
-        user = register_json["user"].encode("utf-8")
-        password = register_json["password"].encode("utf-8")
-        admin = register_json.get("admin", None)
-
-        # Its important to check as we use null bytes as HMAC field separators
-        if b"\x00" in user:
-            raise SynapseError(400, "Invalid user")
-        if b"\x00" in password:
-            raise SynapseError(400, "Invalid password")
-
-        # str() because otherwise hmac complains that 'unicode' does not
-        # have the buffer interface
-        got_mac = str(register_json["mac"])
-
-        want_mac = hmac.new(
-            key=self.hs.config.registration_shared_secret.encode(),
-            digestmod=sha1,
-        )
-        want_mac.update(user)
-        want_mac.update(b"\x00")
-        want_mac.update(password)
-        want_mac.update(b"\x00")
-        want_mac.update(b"admin" if admin else b"notadmin")
-        want_mac = want_mac.hexdigest()
-
-        if compare_digest(want_mac, got_mac):
-            handler = self.handlers.registration_handler
-            user_id, token = yield handler.register(
-                localpart=user.lower(),
-                password=password,
-                admin=bool(admin),
-            )
-            self._remove_session(session)
-            defer.returnValue({
-                "user_id": user_id,
-                "access_token": token,
-                "home_server": self.hs.hostname,
-            })
-        else:
-            raise SynapseError(
-                403, "HMAC incorrect",
-            )
-
 
 class CreateUserRestServlet(ClientV1RestServlet):
     """Handles user creation via a server-to-server interface
     """
 
-    PATTERNS = client_path_patterns("/createUser$", releases=())
+    PATTERNS = v1_only_client_path_patterns("/createUser$")
 
     def __init__(self, hs):
         super(CreateUserRestServlet, self).__init__(hs)
@@ -404,7 +346,7 @@ class CreateUserRestServlet(ClientV1RestServlet):
     def on_POST(self, request):
         user_json = parse_json_object_from_request(request)
 
-        access_token = get_access_token_from_request(request)
+        access_token = self.auth.get_access_token_from_request(request)
         app_service = self.store.get_app_service_by_token(
             access_token
         )
@@ -423,13 +365,7 @@ class CreateUserRestServlet(ClientV1RestServlet):
 
     @defer.inlineCallbacks
     def _do_create(self, requester, user_json):
-        yield run_on_reactor()
-
-        if "localpart" not in user_json:
-            raise SynapseError(400, "Expected 'localpart' key.")
-
-        if "displayname" not in user_json:
-            raise SynapseError(400, "Expected 'displayname' key.")
+        assert_params_in_dict(user_json, ["localpart", "displayname"])
 
         localpart = user_json["localpart"].encode("utf-8")
         displayname = user_json["displayname"].encode("utf-8")
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 30523995af..372648cafd 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2015, 2016 OpenMarket Ltd
 # Copyright 2017 Vector Creations Ltd
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,18 +16,20 @@
 # limitations under the License.
 import logging
 
+from six.moves import http_client
+
 from twisted.internet import defer
 
-from synapse.api.auth import has_access_token
 from synapse.api.constants import LoginType
 from synapse.api.errors import Codes, SynapseError
 from synapse.http.servlet import (
-    RestServlet, assert_params_in_request,
+    RestServlet,
+    assert_params_in_dict,
     parse_json_object_from_request,
 )
-from synapse.util.async import run_on_reactor
 from synapse.util.msisdn import phone_number_to_msisdn
 from synapse.util.threepids import check_3pid_allowed
+
 from ._base import client_v2_patterns, interactive_auth_handler
 
 logger = logging.getLogger(__name__)
@@ -44,7 +47,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
     def on_POST(self, request):
         body = parse_json_object_from_request(request)
 
-        assert_params_in_request(body, [
+        assert_params_in_dict(body, [
             'id_server', 'client_secret', 'email', 'send_attempt'
         ])
 
@@ -77,7 +80,7 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
     def on_POST(self, request):
         body = parse_json_object_from_request(request)
 
-        assert_params_in_request(body, [
+        assert_params_in_dict(body, [
             'id_server', 'client_secret',
             'country', 'phone_number', 'send_attempt',
         ])
@@ -126,7 +129,7 @@ class PasswordRestServlet(RestServlet):
         #
         # In the second case, we require a password to confirm their identity.
 
-        if has_access_token(request):
+        if self.auth.has_access_token(request):
             requester = yield self.auth.get_user_by_req(request)
             params = yield self.auth_handler.validate_user_via_ui_auth(
                 requester, body, self.hs.get_ip_from_request(request),
@@ -156,11 +159,10 @@ class PasswordRestServlet(RestServlet):
                     raise SynapseError(404, "Email address not found", Codes.NOT_FOUND)
                 user_id = threepid_user_id
             else:
-                logger.error("Auth succeeded but no known type!", result.keys())
+                logger.error("Auth succeeded but no known type! %r", result.keys())
                 raise SynapseError(500, "", Codes.UNKNOWN)
 
-        if 'new_password' not in params:
-            raise SynapseError(400, "", Codes.MISSING_PARAM)
+        assert_params_in_dict(params, ["new_password"])
         new_password = params['new_password']
 
         yield self._set_password_handler.set_password(
@@ -187,23 +189,37 @@ class DeactivateAccountRestServlet(RestServlet):
     @defer.inlineCallbacks
     def on_POST(self, request):
         body = parse_json_object_from_request(request)
+        erase = body.get("erase", False)
+        if not isinstance(erase, bool):
+            raise SynapseError(
+                http_client.BAD_REQUEST,
+                "Param 'erase' must be a boolean, if given",
+                Codes.BAD_JSON,
+            )
 
         requester = yield self.auth.get_user_by_req(request)
 
         # allow ASes to dectivate their own users
         if requester.app_service:
             yield self._deactivate_account_handler.deactivate_account(
-                requester.user.to_string()
+                requester.user.to_string(), erase,
             )
             defer.returnValue((200, {}))
 
         yield self.auth_handler.validate_user_via_ui_auth(
             requester, body, self.hs.get_ip_from_request(request),
         )
-        yield self._deactivate_account_handler.deactivate_account(
-            requester.user.to_string(),
+        result = yield self._deactivate_account_handler.deactivate_account(
+            requester.user.to_string(), erase,
         )
-        defer.returnValue((200, {}))
+        if result:
+            id_server_unbind_result = "success"
+        else:
+            id_server_unbind_result = "no-support"
+
+        defer.returnValue((200, {
+            "id_server_unbind_result": id_server_unbind_result,
+        }))
 
 
 class EmailThreepidRequestTokenRestServlet(RestServlet):
@@ -218,15 +234,10 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
     @defer.inlineCallbacks
     def on_POST(self, request):
         body = parse_json_object_from_request(request)
-
-        required = ['id_server', 'client_secret', 'email', 'send_attempt']
-        absent = []
-        for k in required:
-            if k not in body:
-                absent.append(k)
-
-        if absent:
-            raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+        assert_params_in_dict(
+            body,
+            ['id_server', 'client_secret', 'email', 'send_attempt'],
+        )
 
         if not check_3pid_allowed(self.hs, "email", body['email']):
             raise SynapseError(
@@ -256,18 +267,10 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
     @defer.inlineCallbacks
     def on_POST(self, request):
         body = parse_json_object_from_request(request)
-
-        required = [
+        assert_params_in_dict(body, [
             'id_server', 'client_secret',
             'country', 'phone_number', 'send_attempt',
-        ]
-        absent = []
-        for k in required:
-            if k not in body:
-                absent.append(k)
-
-        if absent:
-            raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+        ])
 
         msisdn = phone_number_to_msisdn(body['country'], body['phone_number'])
 
@@ -300,8 +303,6 @@ class ThreepidRestServlet(RestServlet):
 
     @defer.inlineCallbacks
     def on_GET(self, request):
-        yield run_on_reactor()
-
         requester = yield self.auth.get_user_by_req(request)
 
         threepids = yield self.datastore.user_get_threepids(
@@ -312,8 +313,6 @@ class ThreepidRestServlet(RestServlet):
 
     @defer.inlineCallbacks
     def on_POST(self, request):
-        yield run_on_reactor()
-
         body = parse_json_object_from_request(request)
 
         threePidCreds = body.get('threePidCreds')
@@ -365,27 +364,31 @@ class ThreepidDeleteRestServlet(RestServlet):
 
     @defer.inlineCallbacks
     def on_POST(self, request):
-        yield run_on_reactor()
-
         body = parse_json_object_from_request(request)
-
-        required = ['medium', 'address']
-        absent = []
-        for k in required:
-            if k not in body:
-                absent.append(k)
-
-        if absent:
-            raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+        assert_params_in_dict(body, ['medium', 'address'])
 
         requester = yield self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
 
-        yield self.auth_handler.delete_threepid(
-            user_id, body['medium'], body['address']
-        )
+        try:
+            ret = yield self.auth_handler.delete_threepid(
+                user_id, body['medium'], body['address']
+            )
+        except Exception:
+            # NB. This endpoint should succeed if there is nothing to
+            # delete, so it should only throw if something is wrong
+            # that we ought to care about.
+            logger.exception("Failed to remove threepid")
+            raise SynapseError(500, "Failed to remove threepid")
+
+        if ret:
+            id_server_unbind_result = "success"
+        else:
+            id_server_unbind_result = "no-support"
 
-        defer.returnValue((200, {}))
+        defer.returnValue((200, {
+            "id_server_unbind_result": id_server_unbind_result,
+        }))
 
 
 class WhoamiRestServlet(RestServlet):
diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py
index 0e0a187efd..371e9aa354 100644
--- a/synapse/rest/client/v2_alpha/account_data.py
+++ b/synapse/rest/client/v2_alpha/account_data.py
@@ -13,14 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import client_v2_patterns
-
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.api.errors import AuthError, SynapseError
+import logging
 
 from twisted.internet import defer
 
-import logging
+from synapse.api.errors import AuthError, SynapseError
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+
+from ._base import client_v2_patterns
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index d6f3a19648..bd8b5f4afa 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -13,6 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
 from synapse.api.constants import LoginType
@@ -23,9 +25,6 @@ from synapse.http.servlet import RestServlet
 
 from ._base import client_v2_patterns
 
-import logging
-
-
 logger = logging.getLogger(__name__)
 
 RECAPTCHA_TEMPLATE = """
diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py
index 35d58b367a..9b75bb1377 100644
--- a/synapse/rest/client/v2_alpha/devices.py
+++ b/synapse/rest/client/v2_alpha/devices.py
@@ -18,13 +18,18 @@ import logging
 from twisted.internet import defer
 
 from synapse.api import errors
-from synapse.http import servlet
+from synapse.http.servlet import (
+    RestServlet,
+    assert_params_in_dict,
+    parse_json_object_from_request,
+)
+
 from ._base import client_v2_patterns, interactive_auth_handler
 
 logger = logging.getLogger(__name__)
 
 
-class DevicesRestServlet(servlet.RestServlet):
+class DevicesRestServlet(RestServlet):
     PATTERNS = client_v2_patterns("/devices$", v2_alpha=False)
 
     def __init__(self, hs):
@@ -46,7 +51,7 @@ class DevicesRestServlet(servlet.RestServlet):
         defer.returnValue((200, {"devices": devices}))
 
 
-class DeleteDevicesRestServlet(servlet.RestServlet):
+class DeleteDevicesRestServlet(RestServlet):
     """
     API for bulk deletion of devices. Accepts a JSON object with a devices
     key which lists the device_ids to delete. Requires user interactive auth.
@@ -66,19 +71,17 @@ class DeleteDevicesRestServlet(servlet.RestServlet):
         requester = yield self.auth.get_user_by_req(request)
 
         try:
-            body = servlet.parse_json_object_from_request(request)
+            body = parse_json_object_from_request(request)
         except errors.SynapseError as e:
             if e.errcode == errors.Codes.NOT_JSON:
-                # deal with older clients which didn't pass a J*DELETESON dict
+                # DELETE
+                # deal with older clients which didn't pass a JSON dict
                 # the same as those that pass an empty dict
                 body = {}
             else:
                 raise e
 
-        if 'devices' not in body:
-            raise errors.SynapseError(
-                400, "No devices supplied", errcode=errors.Codes.MISSING_PARAM
-            )
+        assert_params_in_dict(body, ["devices"])
 
         yield self.auth_handler.validate_user_via_ui_auth(
             requester, body, self.hs.get_ip_from_request(request),
@@ -91,7 +94,7 @@ class DeleteDevicesRestServlet(servlet.RestServlet):
         defer.returnValue((200, {}))
 
 
-class DeviceRestServlet(servlet.RestServlet):
+class DeviceRestServlet(RestServlet):
     PATTERNS = client_v2_patterns("/devices/(?P<device_id>[^/]*)$", v2_alpha=False)
 
     def __init__(self, hs):
@@ -120,7 +123,7 @@ class DeviceRestServlet(servlet.RestServlet):
         requester = yield self.auth.get_user_by_req(request)
 
         try:
-            body = servlet.parse_json_object_from_request(request)
+            body = parse_json_object_from_request(request)
 
         except errors.SynapseError as e:
             if e.errcode == errors.Codes.NOT_JSON:
@@ -143,7 +146,7 @@ class DeviceRestServlet(servlet.RestServlet):
     def on_PUT(self, request, device_id):
         requester = yield self.auth.get_user_by_req(request, allow_guest=True)
 
-        body = servlet.parse_json_object_from_request(request)
+        body = parse_json_object_from_request(request)
         yield self.device_handler.update_device(
             requester.user.to_string(),
             device_id,
diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py
index 1b9dc4528d..ae86728879 100644
--- a/synapse/rest/client/v2_alpha/filter.py
+++ b/synapse/rest/client/v2_alpha/filter.py
@@ -13,17 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
-from synapse.api.errors import AuthError, SynapseError, StoreError, Codes
+from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.types import UserID
 
-from ._base import client_v2_patterns
-from ._base import set_timeline_upper_limit
-
-import logging
-
+from ._base import client_v2_patterns, set_timeline_upper_limit
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py
index 3bb1ec2af6..21e02c07c0 100644
--- a/synapse/rest/client/v2_alpha/groups.py
+++ b/synapse/rest/client/v2_alpha/groups.py
@@ -14,6 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
@@ -21,8 +23,6 @@ from synapse.types import GroupID
 
 from ._base import client_v2_patterns
 
-import logging
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py
index 3cc87ea63f..8486086b51 100644
--- a/synapse/rest/client/v2_alpha/keys.py
+++ b/synapse/rest/client/v2_alpha/keys.py
@@ -19,10 +19,13 @@ from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
 from synapse.http.servlet import (
-    RestServlet, parse_json_object_from_request, parse_integer
+    RestServlet,
+    parse_integer,
+    parse_json_object_from_request,
+    parse_string,
 )
-from synapse.http.servlet import parse_string
 from synapse.types import StreamToken
+
 from ._base import client_v2_patterns
 
 logger = logging.getLogger(__name__)
diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py
index 66583d6778..2a6ea3df5f 100644
--- a/synapse/rest/client/v2_alpha/notifications.py
+++ b/synapse/rest/client/v2_alpha/notifications.py
@@ -13,19 +13,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
-from synapse.http.servlet import (
-    RestServlet, parse_string, parse_integer
-)
 from synapse.events.utils import (
-    serialize_event, format_event_for_client_v2_without_room_id,
+    format_event_for_client_v2_without_room_id,
+    serialize_event,
 )
+from synapse.http.servlet import RestServlet, parse_integer, parse_string
 
 from ._base import client_v2_patterns
 
-import logging
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py
index aa1cae8e1e..01c90aa2a3 100644
--- a/synapse/rest/client/v2_alpha/openid.py
+++ b/synapse/rest/client/v2_alpha/openid.py
@@ -14,15 +14,15 @@
 # limitations under the License.
 
 
-from ._base import client_v2_patterns
+import logging
+
+from twisted.internet import defer
 
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.api.errors import AuthError
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.util.stringutils import random_string
 
-from twisted.internet import defer
-
-import logging
+from ._base import client_v2_patterns
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py
index 2f8784fe06..a6e582a5ae 100644
--- a/synapse/rest/client/v2_alpha/read_marker.py
+++ b/synapse/rest/client/v2_alpha/read_marker.py
@@ -13,13 +13,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from ._base import client_v2_patterns
-
-import logging
 
+from ._base import client_v2_patterns
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py
index 1fbff2edd8..de370cac45 100644
--- a/synapse/rest/client/v2_alpha/receipts.py
+++ b/synapse/rest/client/v2_alpha/receipts.py
@@ -13,14 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
 from synapse.http.servlet import RestServlet
-from ._base import client_v2_patterns
-
-import logging
 
+from ._base import client_v2_patterns
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 5cab00aea9..2fb4d43ccb 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -14,30 +14,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import hmac
+import logging
+from hashlib import sha1
+
+from six import string_types
+
 from twisted.internet import defer
 
 import synapse
 import synapse.types
-from synapse.api.auth import get_access_token_from_request, has_access_token
 from synapse.api.constants import LoginType
-from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError
+from synapse.api.errors import Codes, SynapseError, UnrecognizedRequestError
+from synapse.config.server import is_threepid_reserved
 from synapse.http.servlet import (
-    RestServlet, parse_json_object_from_request, assert_params_in_request, parse_string
+    RestServlet,
+    assert_params_in_dict,
+    parse_json_object_from_request,
+    parse_string,
 )
 from synapse.util.msisdn import phone_number_to_msisdn
+from synapse.util.ratelimitutils import FederationRateLimiter
 from synapse.util.threepids import check_3pid_allowed
 
 from ._base import client_v2_patterns, interactive_auth_handler
 
-import logging
-import hmac
-from hashlib import sha1
-from synapse.util.async import run_on_reactor
-from synapse.util.ratelimitutils import FederationRateLimiter
-
-from six import string_types
-
-
 # We ought to be using hmac.compare_digest() but on older pythons it doesn't
 # exist. It's a _really minor_ security flaw to use plain string comparison
 # because the timing attack is so obscured by all the other code here it's
@@ -68,7 +69,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
     def on_POST(self, request):
         body = parse_json_object_from_request(request)
 
-        assert_params_in_request(body, [
+        assert_params_in_dict(body, [
             'id_server', 'client_secret', 'email', 'send_attempt'
         ])
 
@@ -104,7 +105,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
     def on_POST(self, request):
         body = parse_json_object_from_request(request)
 
-        assert_params_in_request(body, [
+        assert_params_in_dict(body, [
             'id_server', 'client_secret',
             'country', 'phone_number',
             'send_attempt',
@@ -191,19 +192,17 @@ class RegisterRestServlet(RestServlet):
     @interactive_auth_handler
     @defer.inlineCallbacks
     def on_POST(self, request):
-        yield run_on_reactor()
-
         body = parse_json_object_from_request(request)
 
-        kind = "user"
-        if "kind" in request.args:
-            kind = request.args["kind"][0]
+        kind = b"user"
+        if b"kind" in request.args:
+            kind = request.args[b"kind"][0]
 
-        if kind == "guest":
+        if kind == b"guest":
             ret = yield self._do_guest_registration(body)
             defer.returnValue(ret)
             return
-        elif kind != "user":
+        elif kind != b"user":
             raise UnrecognizedRequestError(
                 "Do not understand membership kind: %s" % (kind,)
             )
@@ -225,7 +224,7 @@ class RegisterRestServlet(RestServlet):
             desired_username = body['username']
 
         appservice = None
-        if has_access_token(request):
+        if self.auth.has_access_token(request):
             appservice = yield self.auth.get_appservice_by_req(request)
 
         # fork off as soon as possible for ASes and shared secret auth which
@@ -243,7 +242,7 @@ class RegisterRestServlet(RestServlet):
             # because the IRC bridges rely on being able to register stupid
             # IDs.
 
-            access_token = get_access_token_from_request(request)
+            access_token = self.auth.get_access_token_from_request(request)
 
             if isinstance(desired_username, string_types):
                 result = yield self._do_appservice_registration(
@@ -388,23 +387,30 @@ class RegisterRestServlet(RestServlet):
             add_msisdn = False
         else:
             # NB: This may be from the auth handler and NOT from the POST
-            if 'password' not in params:
-                raise SynapseError(400, "Missing password.",
-                                   Codes.MISSING_PARAM)
+            assert_params_in_dict(params, ["password"])
 
             desired_username = params.get("username", None)
-            new_password = params.get("password", None)
             guest_access_token = params.get("guest_access_token", None)
+            new_password = params.get("password", None)
 
             if desired_username is not None:
                 desired_username = desired_username.lower()
 
+            threepid = None
+            if auth_result:
+                threepid = auth_result.get(LoginType.EMAIL_IDENTITY)
+
             (registered_user_id, _) = yield self.registration_handler.register(
                 localpart=desired_username,
                 password=new_password,
                 guest_access_token=guest_access_token,
                 generate_token=False,
+                threepid=threepid,
             )
+            # Necessary due to auth checks prior to the threepid being
+            # written to the db
+            if is_threepid_reserved(self.hs.config, threepid):
+                yield self.store.upsert_monthly_active_user(registered_user_id)
 
             # remember that we've now registered that user account, and with
             #  what user ID (since the user may not have specified)
@@ -567,11 +573,14 @@ class RegisterRestServlet(RestServlet):
         Returns:
             defer.Deferred:
         """
-        reqd = ('medium', 'address', 'validated_at')
-        if any(x not in threepid for x in reqd):
-            # This will only happen if the ID server returns a malformed response
-            logger.info("Can't add incomplete 3pid")
-            defer.returnValue()
+        try:
+            assert_params_in_dict(threepid, ['medium', 'address', 'validated_at'])
+        except SynapseError as ex:
+            if ex.errcode == Codes.MISSING_PARAM:
+                # This will only happen if the ID server returns a malformed response
+                logger.info("Can't add incomplete 3pid")
+                defer.returnValue(None)
+            raise
 
         yield self.auth_handler.add_threepid(
             user_id,
@@ -644,7 +653,7 @@ class RegisterRestServlet(RestServlet):
     @defer.inlineCallbacks
     def _do_guest_registration(self, params):
         if not self.hs.config.allow_guest_access:
-            defer.returnValue((403, "Guest access is disabled"))
+            raise SynapseError(403, "Guest access is disabled")
         user_id, _ = yield self.registration_handler.register(
             generate_token=False,
             make_guest=True
diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py
index 8903e12405..95d2a71ec2 100644
--- a/synapse/rest/client/v2_alpha/report_event.py
+++ b/synapse/rest/client/v2_alpha/report_event.py
@@ -13,13 +13,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
+import logging
 
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from ._base import client_v2_patterns
+from six import string_types
+from six.moves import http_client
 
-import logging
+from twisted.internet import defer
+
+from synapse.api.errors import Codes, SynapseError
+from synapse.http.servlet import (
+    RestServlet,
+    assert_params_in_dict,
+    parse_json_object_from_request,
+)
 
+from ._base import client_v2_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -42,12 +50,26 @@ class ReportEventRestServlet(RestServlet):
         user_id = requester.user.to_string()
 
         body = parse_json_object_from_request(request)
+        assert_params_in_dict(body, ("reason", "score"))
+
+        if not isinstance(body["reason"], string_types):
+            raise SynapseError(
+                http_client.BAD_REQUEST,
+                "Param 'reason' must be a string",
+                Codes.BAD_JSON,
+            )
+        if not isinstance(body["score"], int):
+            raise SynapseError(
+                http_client.BAD_REQUEST,
+                "Param 'score' must be an integer",
+                Codes.BAD_JSON,
+            )
 
         yield self.store.add_event_report(
             room_id=room_id,
             event_id=event_id,
             user_id=user_id,
-            reason=body.get("reason"),
+            reason=body["reason"],
             content=body,
             received_ts=self.clock.time_msec(),
         )
diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py
index 90bdb1db15..a9e9a47a0b 100644
--- a/synapse/rest/client/v2_alpha/sendtodevice.py
+++ b/synapse/rest/client/v2_alpha/sendtodevice.py
@@ -40,7 +40,7 @@ class SendToDeviceRestServlet(servlet.RestServlet):
         super(SendToDeviceRestServlet, self).__init__()
         self.hs = hs
         self.auth = hs.get_auth()
-        self.txns = HttpTransactionCache(hs.get_clock())
+        self.txns = HttpTransactionCache(hs)
         self.device_message_handler = hs.get_device_message_handler()
 
     def on_PUT(self, request, message_type, txn_id):
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index a291cffbf1..1275baa1ba 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -13,27 +13,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import itertools
+import logging
+
+from canonicaljson import json
+
 from twisted.internet import defer
 
-from synapse.http.servlet import (
-    RestServlet, parse_string, parse_integer, parse_boolean
+from synapse.api.constants import PresenceState
+from synapse.api.errors import SynapseError
+from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection
+from synapse.events.utils import (
+    format_event_for_client_v2_without_room_id,
+    serialize_event,
 )
 from synapse.handlers.presence import format_user_presence_state
 from synapse.handlers.sync import SyncConfig
+from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
 from synapse.types import StreamToken
-from synapse.events.utils import (
-    serialize_event, format_event_for_client_v2_without_room_id,
-)
-from synapse.api.filtering import FilterCollection, DEFAULT_FILTER_COLLECTION
-from synapse.api.errors import SynapseError
-from synapse.api.constants import PresenceState
-from ._base import client_v2_patterns
-from ._base import set_timeline_upper_limit
-
-import itertools
-import logging
 
-import simplejson as json
+from ._base import client_v2_patterns, set_timeline_upper_limit
 
 logger = logging.getLogger(__name__)
 
@@ -371,6 +370,7 @@ class SyncRestServlet(RestServlet):
             ephemeral_events = room.ephemeral
             result["ephemeral"] = {"events": ephemeral_events}
             result["unread_notifications"] = room.unread_notifications
+            result["summary"] = room.summary
 
         return result
 
diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py
index dac8603b07..4fea614e95 100644
--- a/synapse/rest/client/v2_alpha/tags.py
+++ b/synapse/rest/client/v2_alpha/tags.py
@@ -13,14 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import client_v2_patterns
-
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.api.errors import AuthError
+import logging
 
 from twisted.internet import defer
 
-import logging
+from synapse.api.errors import AuthError
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+
+from ._base import client_v2_patterns
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py
index 6773b9ba60..d9d379182e 100644
--- a/synapse/rest/client/v2_alpha/thirdparty.py
+++ b/synapse/rest/client/v2_alpha/thirdparty.py
@@ -20,6 +20,7 @@ from twisted.internet import defer
 
 from synapse.api.constants import ThirdPartyEntityKind
 from synapse.http.servlet import RestServlet
+
 from ._base import client_v2_patterns
 
 logger = logging.getLogger(__name__)
diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py
index 2d4a43c353..cac0624ba7 100644
--- a/synapse/rest/client/v2_alpha/user_directory.py
+++ b/synapse/rest/client/v2_alpha/user_directory.py
@@ -19,6 +19,7 @@ from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
+
 from ._base import client_v2_patterns
 
 logger = logging.getLogger(__name__)
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 2ecb15deee..29e62bfcdd 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -13,11 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.http.servlet import RestServlet
-
 import logging
 import re
 
+from synapse.http.servlet import RestServlet
+
 logger = logging.getLogger(__name__)
 
 
@@ -27,11 +27,22 @@ class VersionsRestServlet(RestServlet):
     def on_GET(self, request):
         return (200, {
             "versions": [
+                # XXX: at some point we need to decide whether we need to include
+                # the previous version numbers, given we've defined r0.3.0 to be
+                # backwards compatible with r0.2.0.  But need to check how
+                # conscientious we've been in compatibility, and decide whether the
+                # middle number is the major revision when at 0.X.Y (as opposed to
+                # X.Y.Z).  And we need to decide whether it's fair to make clients
+                # parse the version string to figure out what's going on.
                 "r0.0.1",
                 "r0.1.0",
                 "r0.2.0",
                 "r0.3.0",
-            ]
+            ],
+            # as per MSC1497:
+            "unstable_features": {
+                "m.lazy_load_members": True,
+            }
         })
 
 
diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py
index 724911d1e6..7362e1858d 100644
--- a/synapse/rest/consent/consent_resource.py
+++ b/synapse/rest/consent/consent_resource.py
@@ -13,28 +13,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from hashlib import sha256
 import hmac
 import logging
+from hashlib import sha256
 from os import path
+
 from six.moves import http_client
 
 import jinja2
 from jinja2 import TemplateNotFound
+
 from twisted.internet import defer
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
-from synapse.api.errors import NotFoundError, SynapseError, StoreError
+from synapse.api.errors import NotFoundError, StoreError, SynapseError
 from synapse.config import ConfigError
-from synapse.http.server import (
-    finish_request,
-    wrap_html_request_handler,
-)
+from synapse.http.server import finish_request, wrap_html_request_handler
 from synapse.http.servlet import parse_string
 from synapse.types import UserID
 
-
 # language to use for the templates. TODO: figure this out from Accept-Language
 TEMPLATE_LANGUAGE = "en"
 
@@ -142,7 +140,7 @@ class ConsentResource(Resource):
         version = parse_string(request, "v",
                                default=self._default_consent_version)
         username = parse_string(request, "u", required=True)
-        userhmac = parse_string(request, "h", required=True)
+        userhmac = parse_string(request, "h", required=True, encoding=None)
 
         self._check_hash(username, userhmac)
 
@@ -177,7 +175,7 @@ class ConsentResource(Resource):
         """
         version = parse_string(request, "v", required=True)
         username = parse_string(request, "u", required=True)
-        userhmac = parse_string(request, "h", required=True)
+        userhmac = parse_string(request, "h", required=True, encoding=None)
 
         self._check_hash(username, userhmac)
 
@@ -212,9 +210,18 @@ class ConsentResource(Resource):
         finish_request(request)
 
     def _check_hash(self, userid, userhmac):
+        """
+        Args:
+            userid (unicode):
+            userhmac (bytes):
+
+        Raises:
+              SynapseError if the hash doesn't match
+
+        """
         want_mac = hmac.new(
             key=self._hmac_secret,
-            msg=userid,
+            msg=userid.encode('utf-8'),
             digestmod=sha256,
         ).hexdigest()
 
diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py
index 1498d188c1..b9ee6e1c13 100644
--- a/synapse/rest/key/v1/server_key_resource.py
+++ b/synapse/rest/key/v1/server_key_resource.py
@@ -14,14 +14,16 @@
 # limitations under the License.
 
 
-from twisted.web.resource import Resource
-from synapse.http.server import respond_with_json_bytes
+import logging
+
+from canonicaljson import encode_canonical_json
 from signedjson.sign import sign_json
 from unpaddedbase64 import encode_base64
-from canonicaljson import encode_canonical_json
+
 from OpenSSL import crypto
-import logging
+from twisted.web.resource import Resource
 
+from synapse.http.server import respond_with_json_bytes
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/key/v2/__init__.py b/synapse/rest/key/v2/__init__.py
index a07224148c..3491fd2118 100644
--- a/synapse/rest/key/v2/__init__.py
+++ b/synapse/rest/key/v2/__init__.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 from twisted.web.resource import Resource
+
 from .local_key_resource import LocalKey
 from .remote_key_resource import RemoteKey
 
diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py
index 04775b3c45..ec0ec7b431 100644
--- a/synapse/rest/key/v2/local_key_resource.py
+++ b/synapse/rest/key/v2/local_key_resource.py
@@ -14,13 +14,15 @@
 # limitations under the License.
 
 
-from twisted.web.resource import Resource
-from synapse.http.server import respond_with_json_bytes
+import logging
+
+from canonicaljson import encode_canonical_json
 from signedjson.sign import sign_json
 from unpaddedbase64 import encode_base64
-from canonicaljson import encode_canonical_json
-import logging
 
+from twisted.web.resource import Resource
+
+from synapse.http.server import respond_with_json_bytes
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 21b4c1175e..7d67e4b064 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -12,20 +12,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.http.server import (
-    respond_with_json_bytes, wrap_json_request_handler,
-)
-from synapse.http.servlet import parse_integer, parse_json_object_from_request
-from synapse.api.errors import SynapseError, Codes
-from synapse.crypto.keyring import KeyLookupError
+import logging
+from io import BytesIO
 
+from twisted.internet import defer
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
-from twisted.internet import defer
 
+from synapse.api.errors import Codes, SynapseError
+from synapse.crypto.keyring import KeyLookupError
+from synapse.http.server import respond_with_json_bytes, wrap_json_request_handler
+from synapse.http.servlet import parse_integer, parse_json_object_from_request
 
-from io import BytesIO
-import logging
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py
index 956bd5da75..f255f2883f 100644
--- a/synapse/rest/media/v0/content_repository.py
+++ b/synapse/rest/media/v0/content_repository.py
@@ -13,21 +13,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.http.server import respond_with_json_bytes, finish_request
-
-from synapse.api.errors import (
-    Codes, cs_error
-)
-
-from twisted.protocols.basic import FileSender
-from twisted.web import server, resource
-
 import base64
-import simplejson as json
 import logging
 import os
 import re
 
+from canonicaljson import json
+
+from twisted.protocols.basic import FileSender
+from twisted.web import resource, server
+
+from synapse.api.errors import Codes, cs_error
+from synapse.http.server import finish_request, respond_with_json_bytes
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index c0d2f06855..65f4bd2910 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -13,23 +13,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.http.server import respond_with_json, finish_request
-from synapse.api.errors import (
-    cs_error, Codes, SynapseError
-)
-from synapse.util import logcontext
+import logging
+import os
+import urllib
+
+from six.moves.urllib import parse as urlparse
 
 from twisted.internet import defer
 from twisted.protocols.basic import FileSender
 
+from synapse.api.errors import Codes, SynapseError, cs_error
+from synapse.http.server import finish_request, respond_with_json
+from synapse.util import logcontext
 from synapse.util.stringutils import is_ascii
 
-import os
-
-import logging
-import urllib
-from six.moves.urllib import parse as urlparse
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/rest/media/v1/config_resource.py b/synapse/rest/media/v1/config_resource.py
new file mode 100644
index 0000000000..d6605b6027
--- /dev/null
+++ b/synapse/rest/media/v1/config_resource.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 Will Hunt <will@half-shot.uk>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from twisted.internet import defer
+from twisted.web.resource import Resource
+from twisted.web.server import NOT_DONE_YET
+
+from synapse.http.server import respond_with_json, wrap_json_request_handler
+
+
+class MediaConfigResource(Resource):
+    isLeaf = True
+
+    def __init__(self, hs):
+        Resource.__init__(self)
+        config = hs.get_config()
+        self.clock = hs.get_clock()
+        self.auth = hs.get_auth()
+        self.limits_dict = {
+            "m.upload.size": config.max_upload_size,
+        }
+
+    def render_GET(self, request):
+        self._async_render_GET(request)
+        return NOT_DONE_YET
+
+    @wrap_json_request_handler
+    @defer.inlineCallbacks
+    def _async_render_GET(self, request):
+        yield self.auth.get_user_by_req(request)
+        respond_with_json(request, 200, self.limits_dict)
+
+    def render_OPTIONS(self, request):
+        respond_with_json(request, 200, {}, send_cors=True)
+        return NOT_DONE_YET
diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py
index 8cf8820c31..fbfa85f74f 100644
--- a/synapse/rest/media/v1/download_resource.py
+++ b/synapse/rest/media/v1/download_resource.py
@@ -18,11 +18,9 @@ from twisted.internet import defer
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
-from synapse.http.server import (
-    set_cors_headers,
-    wrap_json_request_handler,
-)
 import synapse.http.servlet
+from synapse.http.server import set_cors_headers, wrap_json_request_handler
+
 from ._base import parse_media_id, respond_404
 
 logger = logging.getLogger(__name__)
diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py
index d5164e47e0..c8586fa280 100644
--- a/synapse/rest/media/v1/filepath.py
+++ b/synapse/rest/media/v1/filepath.py
@@ -13,9 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import functools
 import os
 import re
-import functools
 
 NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d")
 
diff --git a/synapse/rest/media/v1/identicon_resource.py b/synapse/rest/media/v1/identicon_resource.py
index 66f2b6bd30..bdbd8d50dd 100644
--- a/synapse/rest/media/v1/identicon_resource.py
+++ b/synapse/rest/media/v1/identicon_resource.py
@@ -13,8 +13,11 @@
 # limitations under the License.
 
 from pydenticon import Generator
+
 from twisted.web.resource import Resource
 
+from synapse.http.servlet import parse_integer
+
 FOREGROUND = [
     "rgb(45,79,255)",
     "rgb(254,180,44)",
@@ -55,8 +58,8 @@ class IdenticonResource(Resource):
 
     def render_GET(self, request):
         name = "/".join(request.postpath)
-        width = int(request.args.get("width", [96])[0])
-        height = int(request.args.get("height", [96])[0])
+        width = parse_integer(request, "width", default=96)
+        height = parse_integer(request, "height", default=96)
         identicon_bytes = self.generate_identicon(name, width, height)
         request.setHeader(b"Content-Type", b"image/png")
         request.setHeader(
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 2ac767d2dc..241c972070 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -14,41 +14,44 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer, threads
+import cgi
+import errno
+import logging
+import os
+import shutil
+
+from six import iteritems
+from six.moves.urllib import parse as urlparse
+
 import twisted.internet.error
 import twisted.web.http
+from twisted.internet import defer, threads
 from twisted.web.resource import Resource
 
-from ._base import respond_404, FileInfo, respond_with_responder
-from .upload_resource import UploadResource
-from .download_resource import DownloadResource
-from .thumbnail_resource import ThumbnailResource
-from .identicon_resource import IdenticonResource
-from .preview_url_resource import PreviewUrlResource
-from .filepath import MediaFilePaths
-from .thumbnailer import Thumbnailer
-from .storage_provider import StorageProviderWrapper
-from .media_storage import MediaStorage
-
-from synapse.http.matrixfederationclient import MatrixFederationHttpClient
-from synapse.util.stringutils import random_string
 from synapse.api.errors import (
-    SynapseError, HttpResponseException, NotFoundError, FederationDeniedError,
+    FederationDeniedError,
+    HttpResponseException,
+    NotFoundError,
+    SynapseError,
 )
-
-from synapse.util.async import Linearizer
-from synapse.util.stringutils import is_ascii
+from synapse.http.matrixfederationclient import MatrixFederationHttpClient
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.async_helpers import Linearizer
 from synapse.util.logcontext import make_deferred_yieldable
 from synapse.util.retryutils import NotRetryingDestination
+from synapse.util.stringutils import is_ascii, random_string
 
-import os
-import errno
-import shutil
-
-import cgi
-import logging
-from six.moves.urllib import parse as urlparse
-from six import iteritems
+from ._base import FileInfo, respond_404, respond_with_responder
+from .config_resource import MediaConfigResource
+from .download_resource import DownloadResource
+from .filepath import MediaFilePaths
+from .identicon_resource import IdenticonResource
+from .media_storage import MediaStorage
+from .preview_url_resource import PreviewUrlResource
+from .storage_provider import StorageProviderWrapper
+from .thumbnail_resource import ThumbnailResource
+from .thumbnailer import Thumbnailer
+from .upload_resource import UploadResource
 
 logger = logging.getLogger(__name__)
 
@@ -58,6 +61,7 @@ UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000
 
 class MediaRepository(object):
     def __init__(self, hs):
+        self.hs = hs
         self.auth = hs.get_auth()
         self.client = MatrixFederationHttpClient(hs)
         self.clock = hs.get_clock()
@@ -94,14 +98,19 @@ class MediaRepository(object):
             storage_providers.append(provider)
 
         self.media_storage = MediaStorage(
-            self.primary_base_path, self.filepaths, storage_providers,
+            self.hs, self.primary_base_path, self.filepaths, storage_providers,
         )
 
         self.clock.looping_call(
-            self._update_recently_accessed,
+            self._start_update_recently_accessed,
             UPDATE_RECENTLY_ACCESSED_TS,
         )
 
+    def _start_update_recently_accessed(self):
+        return run_as_background_process(
+            "update_recently_accessed_media", self._update_recently_accessed,
+        )
+
     @defer.inlineCallbacks
     def _update_recently_accessed(self):
         remote_media = self.recently_accessed_remotes
@@ -371,7 +380,7 @@ class MediaRepository(object):
                 logger.warn("HTTP error fetching remote media %s/%s: %s",
                             server_name, media_id, e.response)
                 if e.code == twisted.web.http.NOT_FOUND:
-                    raise SynapseError.from_http_response_exception(e)
+                    raise e.to_synapse_error()
                 raise SynapseError(502, "Failed to fetch remote media")
 
             except SynapseError:
@@ -746,7 +755,6 @@ class MediaRepositoryResource(Resource):
         Resource.__init__(self)
 
         media_repo = hs.get_media_repository()
-
         self.putChild("upload", UploadResource(hs, media_repo))
         self.putChild("download", DownloadResource(hs, media_repo))
         self.putChild("thumbnail", ThumbnailResource(
@@ -757,3 +765,4 @@ class MediaRepositoryResource(Resource):
             self.putChild("preview_url", PreviewUrlResource(
                 hs, media_repo, media_repo.media_storage,
             ))
+        self.putChild("config", MediaConfigResource(hs))
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index d23fe10b07..a6189224ee 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -13,22 +13,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer, threads
-from twisted.protocols.basic import FileSender
+import contextlib
+import logging
+import os
+import shutil
+import sys
 
 import six
 
-from ._base import Responder
+from twisted.internet import defer, threads
+from twisted.protocols.basic import FileSender
 
 from synapse.util.file_consumer import BackgroundFileConsumer
 from synapse.util.logcontext import make_deferred_yieldable
 
-import contextlib
-import os
-import logging
-import shutil
-import sys
-
+from ._base import Responder
 
 logger = logging.getLogger(__name__)
 
@@ -37,13 +36,15 @@ class MediaStorage(object):
     """Responsible for storing/fetching files from local sources.
 
     Args:
+        hs (synapse.server.Homeserver)
         local_media_directory (str): Base path where we store media on disk
         filepaths (MediaFilePaths)
         storage_providers ([StorageProvider]): List of StorageProvider that are
             used to fetch and store files.
     """
 
-    def __init__(self, local_media_directory, filepaths, storage_providers):
+    def __init__(self, hs, local_media_directory, filepaths, storage_providers):
+        self.hs = hs
         self.local_media_directory = local_media_directory
         self.filepaths = filepaths
         self.storage_providers = storage_providers
@@ -175,7 +176,8 @@ class MediaStorage(object):
             res = yield provider.fetch(path, file_info)
             if res:
                 with res:
-                    consumer = BackgroundFileConsumer(open(local_path, "w"))
+                    consumer = BackgroundFileConsumer(
+                        open(local_path, "wb"), self.hs.get_reactor())
                     yield res.write_to_consumer(consumer)
                     yield consumer.wait()
                 defer.returnValue(local_path)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 565cef2b8d..778ef97337 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -23,31 +23,31 @@ import re
 import shutil
 import sys
 import traceback
-import simplejson as json
 
-from six.moves import urllib_parse as urlparse
 from six import string_types
+from six.moves import urllib_parse as urlparse
+
+from canonicaljson import json
 
-from twisted.web.server import NOT_DONE_YET
 from twisted.internet import defer
 from twisted.web.resource import Resource
+from twisted.web.server import NOT_DONE_YET
 
-from ._base import FileInfo
-
-from synapse.api.errors import (
-    SynapseError, Codes,
-)
-from synapse.util.logcontext import make_deferred_yieldable, run_in_background
-from synapse.util.stringutils import random_string
-from synapse.util.caches.expiringcache import ExpiringCache
+from synapse.api.errors import Codes, SynapseError
 from synapse.http.client import SpiderHttpClient
 from synapse.http.server import (
-    respond_with_json_bytes,
     respond_with_json,
+    respond_with_json_bytes,
     wrap_json_request_handler,
 )
-from synapse.util.async import ObservableDeferred
-from synapse.util.stringutils import is_ascii
+from synapse.http.servlet import parse_integer, parse_string
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.async_helpers import ObservableDeferred
+from synapse.util.caches.expiringcache import ExpiringCache
+from synapse.util.logcontext import make_deferred_yieldable, run_in_background
+from synapse.util.stringutils import is_ascii, random_string
+
+from ._base import FileInfo
 
 logger = logging.getLogger(__name__)
 
@@ -82,7 +82,7 @@ class PreviewUrlResource(Resource):
         self._cache.start()
 
         self._cleaner_loop = self.clock.looping_call(
-            self._expire_url_cache_data, 10 * 1000
+            self._start_expire_url_cache_data, 10 * 1000,
         )
 
     def render_OPTIONS(self, request):
@@ -98,9 +98,9 @@ class PreviewUrlResource(Resource):
 
         # XXX: if get_user_by_req fails, what should we do in an async render?
         requester = yield self.auth.get_user_by_req(request)
-        url = request.args.get("url")[0]
+        url = parse_string(request, "url")
         if "ts" in request.args:
-            ts = int(request.args.get("ts")[0])
+            ts = parse_integer(request, "ts")
         else:
             ts = self.clock.time_msec()
 
@@ -372,6 +372,11 @@ class PreviewUrlResource(Resource):
             "etag": headers["ETag"][0] if "ETag" in headers else None,
         })
 
+    def _start_expire_url_cache_data(self):
+        return run_as_background_process(
+            "expire_url_cache_data", self._expire_url_cache_data,
+        )
+
     @defer.inlineCallbacks
     def _expire_url_cache_data(self):
         """Clean up expired url cache content, media and thumbnails.
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index 0252afd9d3..7b9f8b4d79 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -13,17 +13,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer, threads
+import logging
+import os
+import shutil
 
-from .media_storage import FileResponder
+from twisted.internet import defer, threads
 
 from synapse.config._base import Config
 from synapse.util.logcontext import run_in_background
 
-import logging
-import os
-import shutil
-
+from .media_storage import FileResponder
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
index aae6e464e8..5305e9175f 100644
--- a/synapse/rest/media/v1/thumbnail_resource.py
+++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -20,13 +20,14 @@ from twisted.internet import defer
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
-from synapse.http.server import (
-    set_cors_headers,
-    wrap_json_request_handler,
-)
+from synapse.http.server import set_cors_headers, wrap_json_request_handler
 from synapse.http.servlet import parse_integer, parse_string
+
 from ._base import (
-    FileInfo, parse_media_id, respond_404, respond_with_file,
+    FileInfo,
+    parse_media_id,
+    respond_404,
+    respond_with_file,
     respond_with_responder,
 )
 
diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py
index e1ee535b9a..a4b26c2587 100644
--- a/synapse/rest/media/v1/thumbnailer.py
+++ b/synapse/rest/media/v1/thumbnailer.py
@@ -13,10 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import PIL.Image as Image
+import logging
 from io import BytesIO
 
-import logging
+import PIL.Image as Image
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py
index 7567476fce..c1240e1963 100644
--- a/synapse/rest/media/v1/upload_resource.py
+++ b/synapse/rest/media/v1/upload_resource.py
@@ -20,10 +20,8 @@ from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
 from synapse.api.errors import SynapseError
-from synapse.http.server import (
-    respond_with_json,
-    wrap_json_request_handler,
-)
+from synapse.http.server import respond_with_json, wrap_json_request_handler
+from synapse.http.servlet import parse_string
 
 logger = logging.getLogger(__name__)
 
@@ -57,7 +55,7 @@ class UploadResource(Resource):
         requester = yield self.auth.get_user_by_req(request)
         # TODO: The checks here are a bit late. The content will have
         # already been uploaded to a tmp file at this point
-        content_length = request.getHeader("Content-Length")
+        content_length = request.getHeader(b"Content-Length").decode('ascii')
         if content_length is None:
             raise SynapseError(
                 msg="Request must specify a Content-Length", code=400
@@ -68,10 +66,10 @@ class UploadResource(Resource):
                 code=413,
             )
 
-        upload_name = request.args.get("filename", None)
+        upload_name = parse_string(request, b"filename", encoding=None)
         if upload_name:
             try:
-                upload_name = upload_name[0].decode('UTF-8')
+                upload_name = upload_name.decode('utf8')
             except UnicodeDecodeError:
                 raise SynapseError(
                     msg="Invalid UTF-8 filename parameter: %r" % (upload_name),
@@ -80,8 +78,8 @@ class UploadResource(Resource):
 
         headers = request.requestHeaders
 
-        if headers.hasHeader("Content-Type"):
-            media_type = headers.getRawHeaders(b"Content-Type")[0]
+        if headers.hasHeader(b"Content-Type"):
+            media_type = headers.getRawHeaders(b"Content-Type")[0].decode('ascii')
         else:
             raise SynapseError(
                 msg="Upload request missing 'Content-Type'",
diff --git a/synapse/secrets.py b/synapse/secrets.py
new file mode 100644
index 0000000000..f6280f951c
--- /dev/null
+++ b/synapse/secrets.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Injectable secrets module for Synapse.
+
+See https://docs.python.org/3/library/secrets.html#module-secrets for the API
+used in Python 3.6, and the API emulated in Python 2.7.
+"""
+
+import sys
+
+# secrets is available since python 3.6
+if sys.version_info[0:2] >= (3, 6):
+    import secrets
+
+    def Secrets():
+        return secrets
+
+else:
+    import os
+    import binascii
+
+    class Secrets(object):
+        def token_bytes(self, nbytes=32):
+            return os.urandom(nbytes)
+
+        def token_hex(self, nbytes=32):
+            return binascii.hexlify(self.token_bytes(nbytes)).decode('ascii')
diff --git a/synapse/server.py b/synapse/server.py
index 58dbf78437..938a05f9dc 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -19,6 +19,7 @@
 # partial one for unit test mocking.
 
 # Imports required for the default HomeServer() implementation
+import abc
 import logging
 
 from twisted.enterprise import adbapi
@@ -33,37 +34,41 @@ from synapse.crypto.keyring import Keyring
 from synapse.events.builder import EventBuilderFactory
 from synapse.events.spamcheck import SpamChecker
 from synapse.federation.federation_client import FederationClient
-from synapse.federation.federation_server import FederationServer
+from synapse.federation.federation_server import (
+    FederationHandlerRegistry,
+    FederationServer,
+    ReplicationFederationHandlerRegistry,
+)
 from synapse.federation.send_queue import FederationRemoteSendQueue
-from synapse.federation.federation_server import FederationHandlerRegistry
-from synapse.federation.transport.client import TransportLayerClient
 from synapse.federation.transaction_queue import TransactionQueue
+from synapse.federation.transport.client import TransportLayerClient
+from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer
+from synapse.groups.groups_server import GroupsServerHandler
 from synapse.handlers import Handlers
 from synapse.handlers.appservice import ApplicationServicesHandler
-from synapse.handlers.auth import AuthHandler, MacaroonGeneartor
+from synapse.handlers.auth import AuthHandler, MacaroonGenerator
 from synapse.handlers.deactivate_account import DeactivateAccountHandler
-from synapse.handlers.devicemessage import DeviceMessageHandler
 from synapse.handlers.device import DeviceHandler
+from synapse.handlers.devicemessage import DeviceMessageHandler
 from synapse.handlers.e2e_keys import E2eKeysHandler
+from synapse.handlers.events import EventHandler, EventStreamHandler
+from synapse.handlers.groups_local import GroupsLocalHandler
+from synapse.handlers.initial_sync import InitialSyncHandler
+from synapse.handlers.message import EventCreationHandler, MessageHandler
+from synapse.handlers.pagination import PaginationHandler
 from synapse.handlers.presence import PresenceHandler
-from synapse.handlers.room import RoomCreationHandler
+from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler
+from synapse.handlers.read_marker import ReadMarkerHandler
+from synapse.handlers.receipts import ReceiptsHandler
+from synapse.handlers.room import RoomContextHandler, RoomCreationHandler
 from synapse.handlers.room_list import RoomListHandler
 from synapse.handlers.room_member import RoomMemberMasterHandler
 from synapse.handlers.room_member_worker import RoomMemberWorkerHandler
 from synapse.handlers.set_password import SetPasswordHandler
 from synapse.handlers.sync import SyncHandler
 from synapse.handlers.typing import TypingHandler
-from synapse.handlers.events import EventHandler, EventStreamHandler
-from synapse.handlers.initial_sync import InitialSyncHandler
-from synapse.handlers.receipts import ReceiptsHandler
-from synapse.handlers.read_marker import ReadMarkerHandler
 from synapse.handlers.user_directory import UserDirectoryHandler
-from synapse.handlers.groups_local import GroupsLocalHandler
-from synapse.handlers.profile import ProfileHandler
-from synapse.handlers.message import EventCreationHandler
-from synapse.groups.groups_server import GroupsServerHandler
-from synapse.groups.attestations import GroupAttestionRenewer, GroupAttestationSigning
-from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory
+from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient
 from synapse.http.matrixfederationclient import MatrixFederationHttpClient
 from synapse.notifier import Notifier
 from synapse.push.action_generator import ActionGenerator
@@ -72,13 +77,11 @@ from synapse.rest.media.v1.media_repository import (
     MediaRepository,
     MediaRepositoryResource,
 )
+from synapse.secrets import Secrets
 from synapse.server_notices.server_notices_manager import ServerNoticesManager
 from synapse.server_notices.server_notices_sender import ServerNoticesSender
-from synapse.server_notices.worker_server_notices_sender import (
-    WorkerServerNoticesSender,
-)
+from synapse.server_notices.worker_server_notices_sender import WorkerServerNoticesSender
 from synapse.state import StateHandler, StateResolutionHandler
-from synapse.storage import DataStore
 from synapse.streams.events import EventSources
 from synapse.util import Clock
 from synapse.util.distributor import Distributor
@@ -108,6 +111,8 @@ class HomeServer(object):
         config (synapse.config.homeserver.HomeserverConfig):
     """
 
+    __metaclass__ = abc.ABCMeta
+
     DEPENDENCIES = [
         'http_client',
         'db_pool',
@@ -158,34 +163,56 @@ class HomeServer(object):
         'groups_server_handler',
         'groups_attestation_signing',
         'groups_attestation_renewer',
+        'secrets',
         'spam_checker',
         'room_member_handler',
         'federation_registry',
         'server_notices_manager',
         'server_notices_sender',
+        'message_handler',
+        'pagination_handler',
+        'room_context_handler',
     ]
 
-    def __init__(self, hostname, **kwargs):
+    # This is overridden in derived application classes
+    # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be
+    # instantiated during setup() for future return by get_datastore()
+    DATASTORE_CLASS = abc.abstractproperty()
+
+    def __init__(self, hostname, reactor=None, **kwargs):
         """
         Args:
             hostname : The hostname for the server.
         """
+        if not reactor:
+            from twisted.internet import reactor
+
+        self._reactor = reactor
         self.hostname = hostname
         self._building = {}
 
-        self.clock = Clock()
+        self.clock = Clock(reactor)
         self.distributor = Distributor()
         self.ratelimiter = Ratelimiter()
 
+        self.datastore = None
+
         # Other kwargs are explicit dependencies
         for depname in kwargs:
             setattr(self, depname, kwargs[depname])
 
     def setup(self):
         logger.info("Setting up.")
-        self.datastore = DataStore(self.get_db_conn(), self)
+        with self.get_db_conn() as conn:
+            self.datastore = self.DATASTORE_CLASS(conn, self)
         logger.info("Finished setting up.")
 
+    def get_reactor(self):
+        """
+        Fetch the Twisted reactor in use by this HomeServer.
+        """
+        return self._reactor
+
     def get_ip_from_request(self, request):
         # X-Forwarded-For is handled by our custom request type.
         return request.getClientIP()
@@ -261,7 +288,7 @@ class HomeServer(object):
         return AuthHandler(self)
 
     def build_macaroon_generator(self):
-        return MacaroonGeneartor(self)
+        return MacaroonGenerator(self)
 
     def build_device_handler(self):
         return DeviceHandler(self)
@@ -291,7 +318,10 @@ class HomeServer(object):
         return InitialSyncHandler(self)
 
     def build_profile_handler(self):
-        return ProfileHandler(self)
+        if self.config.worker_app:
+            return BaseProfileHandler(self)
+        else:
+            return MasterProfileHandler(self)
 
     def build_event_creation_handler(self):
         return EventCreationHandler(self)
@@ -328,6 +358,7 @@ class HomeServer(object):
 
         return adbapi.ConnectionPool(
             name,
+            cp_reactor=self.get_reactor(),
             **self.db_config.get("args", {})
         )
 
@@ -394,6 +425,9 @@ class HomeServer(object):
     def build_groups_attestation_renewer(self):
         return GroupAttestionRenewer(self)
 
+    def build_secrets(self):
+        return Secrets()
+
     def build_spam_checker(self):
         return SpamChecker(self)
 
@@ -403,7 +437,10 @@ class HomeServer(object):
         return RoomMemberMasterHandler(self)
 
     def build_federation_registry(self):
-        return FederationHandlerRegistry()
+        if self.config.worker_app:
+            return ReplicationFederationHandlerRegistry(self)
+        else:
+            return FederationHandlerRegistry()
 
     def build_server_notices_manager(self):
         if self.config.worker_app:
@@ -415,6 +452,15 @@ class HomeServer(object):
             return WorkerServerNoticesSender(self)
         return ServerNoticesSender(self)
 
+    def build_message_handler(self):
+        return MessageHandler(self)
+
+    def build_pagination_handler(self):
+        return PaginationHandler(self)
+
+    def build_room_context_handler(self):
+        return RoomContextHandler(self)
+
     def remove_pusher(self, app_id, push_key, user_id):
         return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
 
diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py
index bb74af1af5..5e3044d164 100644
--- a/synapse/server_notices/consent_server_notices.py
+++ b/synapse/server_notices/consent_server_notices.py
@@ -14,7 +14,8 @@
 # limitations under the License.
 import logging
 
-from six import (iteritems, string_types)
+from six import iteritems, string_types
+
 from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
new file mode 100644
index 0000000000..af15cba0ee
--- /dev/null
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -0,0 +1,203 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from six import iteritems
+
+from twisted.internet import defer
+
+from synapse.api.constants import (
+    EventTypes,
+    ServerNoticeLimitReached,
+    ServerNoticeMsgType,
+)
+from synapse.api.errors import AuthError, ResourceLimitError, SynapseError
+from synapse.server_notices.server_notices_manager import SERVER_NOTICE_ROOM_TAG
+
+logger = logging.getLogger(__name__)
+
+
+class ResourceLimitsServerNotices(object):
+    """ Keeps track of whether the server has reached it's resource limit and
+    ensures that the client is kept up to date.
+    """
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer):
+        """
+        self._server_notices_manager = hs.get_server_notices_manager()
+        self._store = hs.get_datastore()
+        self._auth = hs.get_auth()
+        self._config = hs.config
+        self._resouce_limited = False
+        self._message_handler = hs.get_message_handler()
+        self._state = hs.get_state_handler()
+
+        self._notifier = hs.get_notifier()
+
+    @defer.inlineCallbacks
+    def maybe_send_server_notice_to_user(self, user_id):
+        """Check if we need to send a notice to this user, this will be true in
+        two cases.
+        1. The server has reached its limit does not reflect this
+        2. The room state indicates that the server has reached its limit when
+        actually the server is fine
+
+        Args:
+            user_id (str): user to check
+
+        Returns:
+            Deferred
+        """
+        if self._config.hs_disabled is True:
+            return
+
+        if self._config.limit_usage_by_mau is False:
+            return
+
+        if not self._server_notices_manager.is_enabled():
+            # Don't try and send server notices unles they've been enabled
+            return
+
+        timestamp = yield self._store.user_last_seen_monthly_active(user_id)
+        if timestamp is None:
+            # This user will be blocked from receiving the notice anyway.
+            # In practice, not sure we can ever get here
+            return
+
+        # Determine current state of room
+
+        room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id)
+
+        if not room_id:
+            logger.warn("Failed to get server notices room")
+            return
+
+        yield self._check_and_set_tags(user_id, room_id)
+        currently_blocked, ref_events = yield self._is_room_currently_blocked(room_id)
+
+        try:
+            # Normally should always pass in user_id if you have it, but in
+            # this case are checking what would happen to other users if they
+            # were to arrive.
+            try:
+                yield self._auth.check_auth_blocking()
+                is_auth_blocking = False
+            except ResourceLimitError as e:
+                is_auth_blocking = True
+                event_content = e.msg
+                event_limit_type = e.limit_type
+
+            if currently_blocked and not is_auth_blocking:
+                # Room is notifying of a block, when it ought not to be.
+                # Remove block notification
+                content = {
+                    "pinned": ref_events
+                }
+                yield self._server_notices_manager.send_notice(
+                    user_id, content, EventTypes.Pinned, '',
+                )
+
+            elif not currently_blocked and is_auth_blocking:
+                # Room is not notifying of a block, when it ought to be.
+                # Add block notification
+                content = {
+                    'body': event_content,
+                    'msgtype': ServerNoticeMsgType,
+                    'server_notice_type': ServerNoticeLimitReached,
+                    'admin_contact': self._config.admin_contact,
+                    'limit_type': event_limit_type
+                }
+                event = yield self._server_notices_manager.send_notice(
+                    user_id, content, EventTypes.Message,
+                )
+
+                content = {
+                    "pinned": [
+                        event.event_id,
+                    ]
+                }
+                yield self._server_notices_manager.send_notice(
+                    user_id, content, EventTypes.Pinned, '',
+                )
+
+        except SynapseError as e:
+            logger.error("Error sending resource limits server notice: %s", e)
+
+    @defer.inlineCallbacks
+    def _check_and_set_tags(self, user_id, room_id):
+        """
+        Since server notices rooms were originally not with tags,
+        important to check that tags have been set correctly
+        Args:
+            user_id(str): the user in question
+            room_id(str): the server notices room for that user
+        """
+        tags = yield self._store.get_tags_for_room(user_id, room_id)
+        need_to_set_tag = True
+        if tags:
+            if SERVER_NOTICE_ROOM_TAG in tags:
+                # tag already present, nothing to do here
+                need_to_set_tag = False
+        if need_to_set_tag:
+            max_id = yield self._store.add_tag_to_room(
+                user_id, room_id, SERVER_NOTICE_ROOM_TAG, {}
+            )
+            self._notifier.on_new_event(
+                "account_data_key", max_id, users=[user_id]
+            )
+
+    @defer.inlineCallbacks
+    def _is_room_currently_blocked(self, room_id):
+        """
+        Determines if the room is currently blocked
+
+        Args:
+            room_id(str): The room id of the server notices room
+
+        Returns:
+
+            bool: Is the room currently blocked
+            list: The list of pinned events that are unrelated to limit blocking
+            This list can be used as a convenience in the case where the block
+            is to be lifted and the remaining pinned event references need to be
+            preserved
+        """
+        currently_blocked = False
+        pinned_state_event = None
+        try:
+            pinned_state_event = yield self._state.get_current_state(
+                room_id, event_type=EventTypes.Pinned
+            )
+        except AuthError:
+            # The user has yet to join the server notices room
+            pass
+
+        referenced_events = []
+        if pinned_state_event is not None:
+            referenced_events = list(pinned_state_event.content.get('pinned', []))
+
+        events = yield self._store.get_events(referenced_events)
+        for event_id, event in iteritems(events):
+            if event.type != EventTypes.Message:
+                continue
+            if event.content.get("msgtype") == ServerNoticeMsgType:
+                currently_blocked = True
+                # remove event in case we need to disable blocking later on.
+                if event_id in referenced_events:
+                    referenced_events.remove(event.event_id)
+
+        defer.returnValue((currently_blocked, referenced_events))
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index a26deace53..c5cc6d728e 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -22,6 +22,8 @@ from synapse.util.caches.descriptors import cachedInlineCallbacks
 
 logger = logging.getLogger(__name__)
 
+SERVER_NOTICE_ROOM_TAG = "m.server_notice"
+
 
 class ServerNoticesManager(object):
     def __init__(self, hs):
@@ -37,6 +39,8 @@ class ServerNoticesManager(object):
         self._event_creation_handler = hs.get_event_creation_handler()
         self._is_mine_id = hs.is_mine_id
 
+        self._notifier = hs.get_notifier()
+
     def is_enabled(self):
         """Checks if server notices are enabled on this server.
 
@@ -46,7 +50,10 @@ class ServerNoticesManager(object):
         return self._config.server_notices_mxid is not None
 
     @defer.inlineCallbacks
-    def send_notice(self, user_id, event_content):
+    def send_notice(
+        self, user_id, event_content,
+        type=EventTypes.Message, state_key=None
+    ):
         """Send a notice to the given user
 
         Creates the server notices room, if none exists.
@@ -54,9 +61,11 @@ class ServerNoticesManager(object):
         Args:
             user_id (str): mxid of user to send event to.
             event_content (dict): content of event to send
+            type(EventTypes): type of event
+            is_state_event(bool): Is the event a state event
 
         Returns:
-            Deferred[None]
+            Deferred[FrozenEvent]
         """
         room_id = yield self.get_notice_room_for_user(user_id)
 
@@ -65,15 +74,20 @@ class ServerNoticesManager(object):
 
         logger.info("Sending server notice to %s", user_id)
 
-        yield self._event_creation_handler.create_and_send_nonmember_event(
-            requester, {
-                "type": EventTypes.Message,
-                "room_id": room_id,
-                "sender": system_mxid,
-                "content": event_content,
-            },
-            ratelimit=False,
+        event_dict = {
+            "type": type,
+            "room_id": room_id,
+            "sender": system_mxid,
+            "content": event_content,
+        }
+
+        if state_key is not None:
+            event_dict['state_key'] = state_key
+
+        res = yield self._event_creation_handler.create_and_send_nonmember_event(
+            requester, event_dict, ratelimit=False,
         )
+        defer.returnValue(res)
 
     @cachedInlineCallbacks()
     def get_notice_room_for_user(self, user_id):
@@ -142,5 +156,12 @@ class ServerNoticesManager(object):
         )
         room_id = info['room_id']
 
+        max_id = yield self._store.add_tag_to_room(
+            user_id, room_id, SERVER_NOTICE_ROOM_TAG, {},
+        )
+        self._notifier.on_new_event(
+            "account_data_key", max_id, users=[user_id]
+        )
+
         logger.info("Created server notices room %s for %s", room_id, user_id)
         defer.returnValue(room_id)
diff --git a/synapse/server_notices/server_notices_sender.py b/synapse/server_notices/server_notices_sender.py
index 5d23965f34..6121b2f267 100644
--- a/synapse/server_notices/server_notices_sender.py
+++ b/synapse/server_notices/server_notices_sender.py
@@ -12,7 +12,12 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from twisted.internet import defer
+
 from synapse.server_notices.consent_server_notices import ConsentServerNotices
+from synapse.server_notices.resource_limits_server_notices import (
+    ResourceLimitsServerNotices,
+)
 
 
 class ServerNoticesSender(object):
@@ -25,34 +30,34 @@ class ServerNoticesSender(object):
         Args:
             hs (synapse.server.HomeServer):
         """
-        # todo: it would be nice to make this more dynamic
-        self._consent_server_notices = ConsentServerNotices(hs)
+        self._server_notices = (
+            ConsentServerNotices(hs),
+            ResourceLimitsServerNotices(hs)
+        )
 
+    @defer.inlineCallbacks
     def on_user_syncing(self, user_id):
         """Called when the user performs a sync operation.
 
         Args:
             user_id (str): mxid of user who synced
-
-        Returns:
-            Deferred
         """
-        return self._consent_server_notices.maybe_send_server_notice_to_user(
-            user_id,
-        )
+        for sn in self._server_notices:
+            yield sn.maybe_send_server_notice_to_user(
+                user_id,
+            )
 
+    @defer.inlineCallbacks
     def on_user_ip(self, user_id):
         """Called on the master when a worker process saw a client request.
 
         Args:
             user_id (str): mxid
-
-        Returns:
-            Deferred
         """
         # The synchrotrons use a stubbed version of ServerNoticesSender, so
         # we check for notices to send to the user in on_user_ip as well as
         # in on_user_syncing
-        return self._consent_server_notices.maybe_send_server_notice_to_user(
-            user_id,
-        )
+        for sn in self._server_notices:
+            yield sn.maybe_send_server_notice_to_user(
+                user_id,
+            )
diff --git a/synapse/state.py b/synapse/state/__init__.py
index 216418f58d..b34970e4d1 100644
--- a/synapse/state.py
+++ b/synapse/state/__init__.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,26 +14,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+from collections import namedtuple
 
-from twisted.internet import defer
-
-from synapse import event_auth
-from synapse.util.logutils import log_function
-from synapse.util.caches.expiringcache import ExpiringCache
-from synapse.util.metrics import Measure
-from synapse.api.constants import EventTypes
-from synapse.api.errors import AuthError
-from synapse.events.snapshot import EventContext
-from synapse.util.async import Linearizer
-from synapse.util.caches import CACHE_SIZE_FACTOR
+from six import iteritems, itervalues
 
-from collections import namedtuple
 from frozendict import frozendict
 
-import logging
-import hashlib
+from twisted.internet import defer
 
-from six import iteritems, itervalues
+from synapse.api.constants import EventTypes, RoomVersions
+from synapse.events.snapshot import EventContext
+from synapse.state import v1
+from synapse.util.async_helpers import Linearizer
+from synapse.util.caches import get_cache_factor_for
+from synapse.util.caches.expiringcache import ExpiringCache
+from synapse.util.logutils import log_function
+from synapse.util.metrics import Measure
 
 logger = logging.getLogger(__name__)
 
@@ -40,7 +38,7 @@ logger = logging.getLogger(__name__)
 KeyStateTuple = namedtuple("KeyStateTuple", ("context", "type", "state_key"))
 
 
-SIZE_OF_CACHE = int(100000 * CACHE_SIZE_FACTOR)
+SIZE_OF_CACHE = 100000 * get_cache_factor_for("state_cache")
 EVICTION_TIMEOUT_SECONDS = 60 * 60
 
 
@@ -203,25 +201,27 @@ class StateHandler(object):
             # If this is an outlier, then we know it shouldn't have any current
             # state. Certainly store.get_current_state won't return any, and
             # persisting the event won't store the state group.
-            context = EventContext()
             if old_state:
-                context.prev_state_ids = {
+                prev_state_ids = {
                     (s.type, s.state_key): s.event_id for s in old_state
                 }
                 if event.is_state():
-                    context.current_state_ids = dict(context.prev_state_ids)
+                    current_state_ids = dict(prev_state_ids)
                     key = (event.type, event.state_key)
-                    context.current_state_ids[key] = event.event_id
+                    current_state_ids[key] = event.event_id
                 else:
-                    context.current_state_ids = context.prev_state_ids
+                    current_state_ids = prev_state_ids
             else:
-                context.current_state_ids = {}
-                context.prev_state_ids = {}
-            context.prev_state_events = []
+                current_state_ids = {}
+                prev_state_ids = {}
 
             # We don't store state for outliers, so we don't generate a state
-            # froup for it.
-            context.state_group = None
+            # group for it.
+            context = EventContext.with_state(
+                state_group=None,
+                current_state_ids=current_state_ids,
+                prev_state_ids=prev_state_ids,
+            )
 
             defer.returnValue(context)
 
@@ -230,79 +230,84 @@ class StateHandler(object):
             # Let's just correctly fill out the context and create a
             # new state group for it.
 
-            context = EventContext()
-            context.prev_state_ids = {
+            prev_state_ids = {
                 (s.type, s.state_key): s.event_id for s in old_state
             }
 
             if event.is_state():
                 key = (event.type, event.state_key)
-                if key in context.prev_state_ids:
-                    replaces = context.prev_state_ids[key]
+                if key in prev_state_ids:
+                    replaces = prev_state_ids[key]
                     if replaces != event.event_id:  # Paranoia check
                         event.unsigned["replaces_state"] = replaces
-                context.current_state_ids = dict(context.prev_state_ids)
-                context.current_state_ids[key] = event.event_id
+                current_state_ids = dict(prev_state_ids)
+                current_state_ids[key] = event.event_id
             else:
-                context.current_state_ids = context.prev_state_ids
+                current_state_ids = prev_state_ids
 
-            context.state_group = yield self.store.store_state_group(
+            state_group = yield self.store.store_state_group(
                 event.event_id,
                 event.room_id,
                 prev_group=None,
                 delta_ids=None,
-                current_state_ids=context.current_state_ids,
+                current_state_ids=current_state_ids,
+            )
+
+            context = EventContext.with_state(
+                state_group=state_group,
+                current_state_ids=current_state_ids,
+                prev_state_ids=prev_state_ids,
             )
 
-            context.prev_state_events = []
             defer.returnValue(context)
 
         logger.debug("calling resolve_state_groups from compute_event_context")
+
         entry = yield self.resolve_state_groups_for_events(
             event.room_id, [e for e, _ in event.prev_events],
         )
 
-        curr_state = entry.state
+        prev_state_ids = entry.state
+        prev_group = None
+        delta_ids = None
 
-        context = EventContext()
-        context.prev_state_ids = curr_state
         if event.is_state():
             # If this is a state event then we need to create a new state
             # group for the state after this event.
 
             key = (event.type, event.state_key)
-            if key in context.prev_state_ids:
-                replaces = context.prev_state_ids[key]
+            if key in prev_state_ids:
+                replaces = prev_state_ids[key]
                 event.unsigned["replaces_state"] = replaces
 
-            context.current_state_ids = dict(context.prev_state_ids)
-            context.current_state_ids[key] = event.event_id
+            current_state_ids = dict(prev_state_ids)
+            current_state_ids[key] = event.event_id
 
             if entry.state_group:
                 # If the state at the event has a state group assigned then
                 # we can use that as the prev group
-                context.prev_group = entry.state_group
-                context.delta_ids = {
+                prev_group = entry.state_group
+                delta_ids = {
                     key: event.event_id
                 }
             elif entry.prev_group:
                 # If the state at the event only has a prev group, then we can
                 # use that as a prev group too.
-                context.prev_group = entry.prev_group
-                context.delta_ids = dict(entry.delta_ids)
-                context.delta_ids[key] = event.event_id
+                prev_group = entry.prev_group
+                delta_ids = dict(entry.delta_ids)
+                delta_ids[key] = event.event_id
 
-            context.state_group = yield self.store.store_state_group(
+            state_group = yield self.store.store_state_group(
                 event.event_id,
                 event.room_id,
-                prev_group=context.prev_group,
-                delta_ids=context.delta_ids,
-                current_state_ids=context.current_state_ids,
+                prev_group=prev_group,
+                delta_ids=delta_ids,
+                current_state_ids=current_state_ids,
             )
         else:
-            context.current_state_ids = context.prev_state_ids
-            context.prev_group = entry.prev_group
-            context.delta_ids = entry.delta_ids
+            current_state_ids = prev_state_ids
+            prev_group = entry.prev_group
+            delta_ids = entry.delta_ids
 
             if entry.state_group is None:
                 entry.state_group = yield self.store.store_state_group(
@@ -310,13 +315,20 @@ class StateHandler(object):
                     event.room_id,
                     prev_group=entry.prev_group,
                     delta_ids=entry.delta_ids,
-                    current_state_ids=context.current_state_ids,
+                    current_state_ids=current_state_ids,
                 )
                 entry.state_id = entry.state_group
 
-            context.state_group = entry.state_group
+            state_group = entry.state_group
+
+        context = EventContext.with_state(
+            state_group=state_group,
+            current_state_ids=current_state_ids,
+            prev_state_ids=prev_state_ids,
+            prev_group=prev_group,
+            delta_ids=delta_ids,
+        )
 
-        context.prev_state_events = []
         defer.returnValue(context)
 
     @defer.inlineCallbacks
@@ -325,8 +337,11 @@ class StateHandler(object):
         event, resolves conflicts between them and returns them.
 
         Args:
-            room_id (str):
-            event_ids (list[str]):
+            room_id (str)
+            event_ids (list[str])
+            explicit_room_version (str|None): If set uses the the given room
+                version to choose the resolution algorithm. If None, then
+                checks the database for room version.
 
         Returns:
             Deferred[_StateCacheEntry]: resolved state
@@ -340,7 +355,12 @@ class StateHandler(object):
             room_id, event_ids
         )
 
-        if len(state_groups_ids) == 1:
+        if len(state_groups_ids) == 0:
+            defer.returnValue(_StateCacheEntry(
+                state={},
+                state_group=None,
+            ))
+        elif len(state_groups_ids) == 1:
             name, state_list = list(state_groups_ids.items()).pop()
 
             prev_group, delta_ids = yield self.store.get_state_group_delta(name)
@@ -352,8 +372,11 @@ class StateHandler(object):
                 delta_ids=delta_ids,
             ))
 
+        room_version = yield self.store.get_room_version(room_id)
+
         result = yield self._state_resolution_handler.resolve_state_groups(
-            room_id, state_groups_ids, None, self._state_map_factory,
+            room_id, room_version, state_groups_ids, None,
+            self._state_map_factory,
         )
         defer.returnValue(result)
 
@@ -362,7 +385,7 @@ class StateHandler(object):
             ev_ids, get_prev_content=False, check_redacted=False,
         )
 
-    def resolve_events(self, state_sets, event):
+    def resolve_events(self, room_version, state_sets, event):
         logger.info(
             "Resolving state for %s with %d groups", event.room_id, len(state_sets)
         )
@@ -378,7 +401,9 @@ class StateHandler(object):
         }
 
         with Measure(self.clock, "state._resolve_events"):
-            new_state = resolve_events_with_state_map(state_set_ids, state_map)
+            new_state = resolve_events_with_state_map(
+                room_version, state_set_ids, state_map,
+            )
 
         new_state = {
             key: state_map[ev_id] for key, ev_id in iteritems(new_state)
@@ -417,7 +442,7 @@ class StateResolutionHandler(object):
     @defer.inlineCallbacks
     @log_function
     def resolve_state_groups(
-        self, room_id, state_groups_ids, event_map, state_map_factory,
+        self, room_id, room_version, state_groups_ids, event_map, state_map_factory,
     ):
         """Resolves conflicts between a set of state groups
 
@@ -426,6 +451,7 @@ class StateResolutionHandler(object):
 
         Args:
             room_id (str): room we are resolving for (used for logging)
+            room_version (str): version of the room
             state_groups_ids (dict[int, dict[(str, str), str]]):
                  map from state group id to the state in that state group
                 (where 'state' is a map from state key to event id)
@@ -458,69 +484,40 @@ class StateResolutionHandler(object):
                 "Resolving state for %s with %d groups", room_id, len(state_groups_ids)
             )
 
-            # build a map from state key to the event_ids which set that state.
-            # dict[(str, str), set[str])
-            state = {}
+            # start by assuming we won't have any conflicted state, and build up the new
+            # state map by iterating through the state groups. If we discover a conflict,
+            # we give up and instead use `resolve_events_with_factory`.
+            #
+            # XXX: is this actually worthwhile, or should we just let
+            # resolve_events_with_factory do it?
+            new_state = {}
+            conflicted_state = False
             for st in itervalues(state_groups_ids):
                 for key, e_id in iteritems(st):
-                    state.setdefault(key, set()).add(e_id)
-
-            # build a map from state key to the event_ids which set that state,
-            # including only those where there are state keys in conflict.
-            conflicted_state = {
-                k: list(v)
-                for k, v in iteritems(state)
-                if len(v) > 1
-            }
+                    if key in new_state:
+                        conflicted_state = True
+                        break
+                    new_state[key] = e_id
+                if conflicted_state:
+                    break
 
             if conflicted_state:
                 logger.info("Resolving conflicted state for %r", room_id)
                 with Measure(self.clock, "state._resolve_events"):
                     new_state = yield resolve_events_with_factory(
-                        list(state_groups_ids.values()),
+                        room_version,
+                        list(itervalues(state_groups_ids)),
                         event_map=event_map,
                         state_map_factory=state_map_factory,
                     )
-            else:
-                new_state = {
-                    key: e_ids.pop() for key, e_ids in iteritems(state)
-                }
 
-            with Measure(self.clock, "state.create_group_ids"):
-                # if the new state matches any of the input state groups, we can
-                # use that state group again. Otherwise we will generate a state_id
-                # which will be used as a cache key for future resolutions, but
-                # not get persisted.
-                state_group = None
-                new_state_event_ids = frozenset(itervalues(new_state))
-                for sg, events in iteritems(state_groups_ids):
-                    if new_state_event_ids == frozenset(e_id for e_id in events):
-                        state_group = sg
-                        break
+            # if the new state matches any of the input state groups, we can
+            # use that state group again. Otherwise we will generate a state_id
+            # which will be used as a cache key for future resolutions, but
+            # not get persisted.
 
-                # TODO: We want to create a state group for this set of events, to
-                # increase cache hits, but we need to make sure that it doesn't
-                # end up as a prev_group without being added to the database
-
-                prev_group = None
-                delta_ids = None
-                for old_group, old_ids in iteritems(state_groups_ids):
-                    if not set(new_state) - set(old_ids):
-                        n_delta_ids = {
-                            k: v
-                            for k, v in iteritems(new_state)
-                            if old_ids.get(k) != v
-                        }
-                        if not delta_ids or len(n_delta_ids) < len(delta_ids):
-                            prev_group = old_group
-                            delta_ids = n_delta_ids
-
-            cache = _StateCacheEntry(
-                state=new_state,
-                state_group=state_group,
-                prev_group=prev_group,
-                delta_ids=delta_ids,
-            )
+            with Measure(self.clock, "state.create_group_ids"):
+                cache = _make_state_cache_entry(new_state, state_groups_ids)
 
             if self._state_cache is not None:
                 self._state_cache[group_names] = cache
@@ -528,16 +525,74 @@ class StateResolutionHandler(object):
             defer.returnValue(cache)
 
 
-def _ordered_events(events):
-    def key_func(e):
-        return -int(e.depth), hashlib.sha1(e.event_id.encode()).hexdigest()
+def _make_state_cache_entry(
+    new_state,
+    state_groups_ids,
+):
+    """Given a resolved state, and a set of input state groups, pick one to base
+    a new state group on (if any), and return an appropriately-constructed
+    _StateCacheEntry.
 
-    return sorted(events, key=key_func)
+    Args:
+        new_state (dict[(str, str), str]): resolved state map (mapping from
+           (type, state_key) to event_id)
 
+        state_groups_ids (dict[int, dict[(str, str), str]]):
+                 map from state group id to the state in that state group
+                (where 'state' is a map from state key to event id)
 
-def resolve_events_with_state_map(state_sets, state_map):
+    Returns:
+        _StateCacheEntry
+    """
+    # if the new state matches any of the input state groups, we can
+    # use that state group again. Otherwise we will generate a state_id
+    # which will be used as a cache key for future resolutions, but
+    # not get persisted.
+
+    # first look for exact matches
+    new_state_event_ids = set(itervalues(new_state))
+    for sg, state in iteritems(state_groups_ids):
+        if len(new_state_event_ids) != len(state):
+            continue
+
+        old_state_event_ids = set(itervalues(state))
+        if new_state_event_ids == old_state_event_ids:
+            # got an exact match.
+            return _StateCacheEntry(
+                state=new_state,
+                state_group=sg,
+            )
+
+    # TODO: We want to create a state group for this set of events, to
+    # increase cache hits, but we need to make sure that it doesn't
+    # end up as a prev_group without being added to the database
+
+    # failing that, look for the closest match.
+    prev_group = None
+    delta_ids = None
+
+    for old_group, old_state in iteritems(state_groups_ids):
+        n_delta_ids = {
+            k: v
+            for k, v in iteritems(new_state)
+            if old_state.get(k) != v
+        }
+        if not delta_ids or len(n_delta_ids) < len(delta_ids):
+            prev_group = old_group
+            delta_ids = n_delta_ids
+
+    return _StateCacheEntry(
+        state=new_state,
+        state_group=None,
+        prev_group=prev_group,
+        delta_ids=delta_ids,
+    )
+
+
+def resolve_events_with_state_map(room_version, state_sets, state_map):
     """
     Args:
+        room_version(str): Version of the room
         state_sets(list): List of dicts of (type, state_key) -> event_id,
             which are the different state groups to resolve.
         state_map(dict): a dict from event_id to event, for all events in
@@ -547,74 +602,23 @@ def resolve_events_with_state_map(state_sets, state_map):
         dict[(str, str), str]:
             a map from (type, state_key) to event_id.
     """
-    if len(state_sets) == 1:
-        return state_sets[0]
-
-    unconflicted_state, conflicted_state = _seperate(
-        state_sets,
-    )
-
-    auth_events = _create_auth_events_from_maps(
-        unconflicted_state, conflicted_state, state_map
-    )
-
-    return _resolve_with_state(
-        unconflicted_state, conflicted_state, auth_events, state_map
-    )
-
-
-def _seperate(state_sets):
-    """Takes the state_sets and figures out which keys are conflicted and
-    which aren't. i.e., which have multiple different event_ids associated
-    with them in different state sets.
-
-    Args:
-        state_sets(list[dict[(str, str), str]]):
-            List of dicts of (type, state_key) -> event_id, which are the
-            different state groups to resolve.
-
-    Returns:
-        (dict[(str, str), str], dict[(str, str), set[str]]):
-            A tuple of (unconflicted_state, conflicted_state), where:
-
-            unconflicted_state is a dict mapping (type, state_key)->event_id
-            for unconflicted state keys.
-
-            conflicted_state is a dict mapping (type, state_key) to a set of
-            event ids for conflicted state keys.
-    """
-    unconflicted_state = dict(state_sets[0])
-    conflicted_state = {}
-
-    for state_set in state_sets[1:]:
-        for key, value in iteritems(state_set):
-            # Check if there is an unconflicted entry for the state key.
-            unconflicted_value = unconflicted_state.get(key)
-            if unconflicted_value is None:
-                # There isn't an unconflicted entry so check if there is a
-                # conflicted entry.
-                ls = conflicted_state.get(key)
-                if ls is None:
-                    # There wasn't a conflicted entry so haven't seen this key before.
-                    # Therefore it isn't conflicted yet.
-                    unconflicted_state[key] = value
-                else:
-                    # This key is already conflicted, add our value to the conflict set.
-                    ls.add(value)
-            elif unconflicted_value != value:
-                # If the unconflicted value is not the same as our value then we
-                # have a new conflict. So move the key from the unconflicted_state
-                # to the conflicted state.
-                conflicted_state[key] = {value, unconflicted_value}
-                unconflicted_state.pop(key, None)
-
-    return unconflicted_state, conflicted_state
+    if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,):
+        return v1.resolve_events_with_state_map(
+            state_sets, state_map,
+        )
+    else:
+        # This should only happen if we added a version but forgot to add it to
+        # the list above.
+        raise Exception(
+            "No state resolution algorithm defined for version %r" % (room_version,)
+        )
 
 
-@defer.inlineCallbacks
-def resolve_events_with_factory(state_sets, event_map, state_map_factory):
+def resolve_events_with_factory(room_version, state_sets, event_map, state_map_factory):
     """
     Args:
+        room_version(str): Version of the room
+
         state_sets(list): List of dicts of (type, state_key) -> event_id,
             which are the different state groups to resolve.
 
@@ -634,185 +638,13 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory):
         Deferred[dict[(str, str), str]]:
             a map from (type, state_key) to event_id.
     """
-    if len(state_sets) == 1:
-        defer.returnValue(state_sets[0])
-
-    unconflicted_state, conflicted_state = _seperate(
-        state_sets,
-    )
-
-    needed_events = set(
-        event_id
-        for event_ids in itervalues(conflicted_state)
-        for event_id in event_ids
-    )
-    if event_map is not None:
-        needed_events -= set(event_map.iterkeys())
-
-    logger.info("Asking for %d conflicted events", len(needed_events))
-
-    # dict[str, FrozenEvent]: a map from state event id to event. Only includes
-    # the state events which are in conflict (and those in event_map)
-    state_map = yield state_map_factory(needed_events)
-    if event_map is not None:
-        state_map.update(event_map)
-
-    # get the ids of the auth events which allow us to authenticate the
-    # conflicted state, picking only from the unconflicting state.
-    #
-    # dict[(str, str), str]: a map from state key to event id
-    auth_events = _create_auth_events_from_maps(
-        unconflicted_state, conflicted_state, state_map
-    )
-
-    new_needed_events = set(itervalues(auth_events))
-    new_needed_events -= needed_events
-    if event_map is not None:
-        new_needed_events -= set(event_map.iterkeys())
-
-    logger.info("Asking for %d auth events", len(new_needed_events))
-
-    state_map_new = yield state_map_factory(new_needed_events)
-    state_map.update(state_map_new)
-
-    defer.returnValue(_resolve_with_state(
-        unconflicted_state, conflicted_state, auth_events, state_map
-    ))
-
-
-def _create_auth_events_from_maps(unconflicted_state, conflicted_state, state_map):
-    auth_events = {}
-    for event_ids in itervalues(conflicted_state):
-        for event_id in event_ids:
-            if event_id in state_map:
-                keys = event_auth.auth_types_for_event(state_map[event_id])
-                for key in keys:
-                    if key not in auth_events:
-                        event_id = unconflicted_state.get(key, None)
-                        if event_id:
-                            auth_events[key] = event_id
-    return auth_events
-
-
-def _resolve_with_state(unconflicted_state_ids, conflicted_state_ds, auth_event_ids,
-                        state_map):
-    conflicted_state = {}
-    for key, event_ids in iteritems(conflicted_state_ds):
-        events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map]
-        if len(events) > 1:
-            conflicted_state[key] = events
-        elif len(events) == 1:
-            unconflicted_state_ids[key] = events[0].event_id
-
-    auth_events = {
-        key: state_map[ev_id]
-        for key, ev_id in iteritems(auth_event_ids)
-        if ev_id in state_map
-    }
-
-    try:
-        resolved_state = _resolve_state_events(
-            conflicted_state, auth_events
+    if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,):
+        return v1.resolve_events_with_factory(
+            state_sets, event_map, state_map_factory,
+        )
+    else:
+        # This should only happen if we added a version but forgot to add it to
+        # the list above.
+        raise Exception(
+            "No state resolution algorithm defined for version %r" % (room_version,)
         )
-    except Exception:
-        logger.exception("Failed to resolve state")
-        raise
-
-    new_state = unconflicted_state_ids
-    for key, event in iteritems(resolved_state):
-        new_state[key] = event.event_id
-
-    return new_state
-
-
-def _resolve_state_events(conflicted_state, auth_events):
-    """ This is where we actually decide which of the conflicted state to
-    use.
-
-    We resolve conflicts in the following order:
-        1. power levels
-        2. join rules
-        3. memberships
-        4. other events.
-    """
-    resolved_state = {}
-    if POWER_KEY in conflicted_state:
-        events = conflicted_state[POWER_KEY]
-        logger.debug("Resolving conflicted power levels %r", events)
-        resolved_state[POWER_KEY] = _resolve_auth_events(
-            events, auth_events)
-
-    auth_events.update(resolved_state)
-
-    for key, events in iteritems(conflicted_state):
-        if key[0] == EventTypes.JoinRules:
-            logger.debug("Resolving conflicted join rules %r", events)
-            resolved_state[key] = _resolve_auth_events(
-                events,
-                auth_events
-            )
-
-    auth_events.update(resolved_state)
-
-    for key, events in iteritems(conflicted_state):
-        if key[0] == EventTypes.Member:
-            logger.debug("Resolving conflicted member lists %r", events)
-            resolved_state[key] = _resolve_auth_events(
-                events,
-                auth_events
-            )
-
-    auth_events.update(resolved_state)
-
-    for key, events in iteritems(conflicted_state):
-        if key not in resolved_state:
-            logger.debug("Resolving conflicted state %r:%r", key, events)
-            resolved_state[key] = _resolve_normal_events(
-                events, auth_events
-            )
-
-    return resolved_state
-
-
-def _resolve_auth_events(events, auth_events):
-    reverse = [i for i in reversed(_ordered_events(events))]
-
-    auth_keys = set(
-        key
-        for event in events
-        for key in event_auth.auth_types_for_event(event)
-    )
-
-    new_auth_events = {}
-    for key in auth_keys:
-        auth_event = auth_events.get(key, None)
-        if auth_event:
-            new_auth_events[key] = auth_event
-
-    auth_events = new_auth_events
-
-    prev_event = reverse[0]
-    for event in reverse[1:]:
-        auth_events[(prev_event.type, prev_event.state_key)] = prev_event
-        try:
-            # The signatures have already been checked at this point
-            event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
-            prev_event = event
-        except AuthError:
-            return prev_event
-
-    return event
-
-
-def _resolve_normal_events(events, auth_events):
-    for event in _ordered_events(events):
-        try:
-            # The signatures have already been checked at this point
-            event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
-            return event
-        except AuthError:
-            pass
-
-    # Use the last event (the one with the least depth) if they all fail
-    # the auth check.
-    return event
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
new file mode 100644
index 0000000000..3a1f7054a1
--- /dev/null
+++ b/synapse/state/v1.py
@@ -0,0 +1,321 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import logging
+
+from six import iteritems, iterkeys, itervalues
+
+from twisted.internet import defer
+
+from synapse import event_auth
+from synapse.api.constants import EventTypes
+from synapse.api.errors import AuthError
+
+logger = logging.getLogger(__name__)
+
+
+POWER_KEY = (EventTypes.PowerLevels, "")
+
+
+def resolve_events_with_state_map(state_sets, state_map):
+    """
+    Args:
+        state_sets(list): List of dicts of (type, state_key) -> event_id,
+            which are the different state groups to resolve.
+        state_map(dict): a dict from event_id to event, for all events in
+            state_sets.
+
+    Returns
+        dict[(str, str), str]:
+            a map from (type, state_key) to event_id.
+    """
+    if len(state_sets) == 1:
+        return state_sets[0]
+
+    unconflicted_state, conflicted_state = _seperate(
+        state_sets,
+    )
+
+    auth_events = _create_auth_events_from_maps(
+        unconflicted_state, conflicted_state, state_map
+    )
+
+    return _resolve_with_state(
+        unconflicted_state, conflicted_state, auth_events, state_map
+    )
+
+
+@defer.inlineCallbacks
+def resolve_events_with_factory(state_sets, event_map, state_map_factory):
+    """
+    Args:
+        state_sets(list): List of dicts of (type, state_key) -> event_id,
+            which are the different state groups to resolve.
+
+        event_map(dict[str,FrozenEvent]|None):
+            a dict from event_id to event, for any events that we happen to
+            have in flight (eg, those currently being persisted). This will be
+            used as a starting point fof finding the state we need; any missing
+            events will be requested via state_map_factory.
+
+            If None, all events will be fetched via state_map_factory.
+
+        state_map_factory(func): will be called
+            with a list of event_ids that are needed, and should return with
+            a Deferred of dict of event_id to event.
+
+    Returns
+        Deferred[dict[(str, str), str]]:
+            a map from (type, state_key) to event_id.
+    """
+    if len(state_sets) == 1:
+        defer.returnValue(state_sets[0])
+
+    unconflicted_state, conflicted_state = _seperate(
+        state_sets,
+    )
+
+    needed_events = set(
+        event_id
+        for event_ids in itervalues(conflicted_state)
+        for event_id in event_ids
+    )
+    if event_map is not None:
+        needed_events -= set(iterkeys(event_map))
+
+    logger.info("Asking for %d conflicted events", len(needed_events))
+
+    # dict[str, FrozenEvent]: a map from state event id to event. Only includes
+    # the state events which are in conflict (and those in event_map)
+    state_map = yield state_map_factory(needed_events)
+    if event_map is not None:
+        state_map.update(event_map)
+
+    # get the ids of the auth events which allow us to authenticate the
+    # conflicted state, picking only from the unconflicting state.
+    #
+    # dict[(str, str), str]: a map from state key to event id
+    auth_events = _create_auth_events_from_maps(
+        unconflicted_state, conflicted_state, state_map
+    )
+
+    new_needed_events = set(itervalues(auth_events))
+    new_needed_events -= needed_events
+    if event_map is not None:
+        new_needed_events -= set(iterkeys(event_map))
+
+    logger.info("Asking for %d auth events", len(new_needed_events))
+
+    state_map_new = yield state_map_factory(new_needed_events)
+    state_map.update(state_map_new)
+
+    defer.returnValue(_resolve_with_state(
+        unconflicted_state, conflicted_state, auth_events, state_map
+    ))
+
+
+def _seperate(state_sets):
+    """Takes the state_sets and figures out which keys are conflicted and
+    which aren't. i.e., which have multiple different event_ids associated
+    with them in different state sets.
+
+    Args:
+        state_sets(iterable[dict[(str, str), str]]):
+            List of dicts of (type, state_key) -> event_id, which are the
+            different state groups to resolve.
+
+    Returns:
+        (dict[(str, str), str], dict[(str, str), set[str]]):
+            A tuple of (unconflicted_state, conflicted_state), where:
+
+            unconflicted_state is a dict mapping (type, state_key)->event_id
+            for unconflicted state keys.
+
+            conflicted_state is a dict mapping (type, state_key) to a set of
+            event ids for conflicted state keys.
+    """
+    state_set_iterator = iter(state_sets)
+    unconflicted_state = dict(next(state_set_iterator))
+    conflicted_state = {}
+
+    for state_set in state_set_iterator:
+        for key, value in iteritems(state_set):
+            # Check if there is an unconflicted entry for the state key.
+            unconflicted_value = unconflicted_state.get(key)
+            if unconflicted_value is None:
+                # There isn't an unconflicted entry so check if there is a
+                # conflicted entry.
+                ls = conflicted_state.get(key)
+                if ls is None:
+                    # There wasn't a conflicted entry so haven't seen this key before.
+                    # Therefore it isn't conflicted yet.
+                    unconflicted_state[key] = value
+                else:
+                    # This key is already conflicted, add our value to the conflict set.
+                    ls.add(value)
+            elif unconflicted_value != value:
+                # If the unconflicted value is not the same as our value then we
+                # have a new conflict. So move the key from the unconflicted_state
+                # to the conflicted state.
+                conflicted_state[key] = {value, unconflicted_value}
+                unconflicted_state.pop(key, None)
+
+    return unconflicted_state, conflicted_state
+
+
+def _create_auth_events_from_maps(unconflicted_state, conflicted_state, state_map):
+    auth_events = {}
+    for event_ids in itervalues(conflicted_state):
+        for event_id in event_ids:
+            if event_id in state_map:
+                keys = event_auth.auth_types_for_event(state_map[event_id])
+                for key in keys:
+                    if key not in auth_events:
+                        event_id = unconflicted_state.get(key, None)
+                        if event_id:
+                            auth_events[key] = event_id
+    return auth_events
+
+
+def _resolve_with_state(unconflicted_state_ids, conflicted_state_ids, auth_event_ids,
+                        state_map):
+    conflicted_state = {}
+    for key, event_ids in iteritems(conflicted_state_ids):
+        events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map]
+        if len(events) > 1:
+            conflicted_state[key] = events
+        elif len(events) == 1:
+            unconflicted_state_ids[key] = events[0].event_id
+
+    auth_events = {
+        key: state_map[ev_id]
+        for key, ev_id in iteritems(auth_event_ids)
+        if ev_id in state_map
+    }
+
+    try:
+        resolved_state = _resolve_state_events(
+            conflicted_state, auth_events
+        )
+    except Exception:
+        logger.exception("Failed to resolve state")
+        raise
+
+    new_state = unconflicted_state_ids
+    for key, event in iteritems(resolved_state):
+        new_state[key] = event.event_id
+
+    return new_state
+
+
+def _resolve_state_events(conflicted_state, auth_events):
+    """ This is where we actually decide which of the conflicted state to
+    use.
+
+    We resolve conflicts in the following order:
+        1. power levels
+        2. join rules
+        3. memberships
+        4. other events.
+    """
+    resolved_state = {}
+    if POWER_KEY in conflicted_state:
+        events = conflicted_state[POWER_KEY]
+        logger.debug("Resolving conflicted power levels %r", events)
+        resolved_state[POWER_KEY] = _resolve_auth_events(
+            events, auth_events)
+
+    auth_events.update(resolved_state)
+
+    for key, events in iteritems(conflicted_state):
+        if key[0] == EventTypes.JoinRules:
+            logger.debug("Resolving conflicted join rules %r", events)
+            resolved_state[key] = _resolve_auth_events(
+                events,
+                auth_events
+            )
+
+    auth_events.update(resolved_state)
+
+    for key, events in iteritems(conflicted_state):
+        if key[0] == EventTypes.Member:
+            logger.debug("Resolving conflicted member lists %r", events)
+            resolved_state[key] = _resolve_auth_events(
+                events,
+                auth_events
+            )
+
+    auth_events.update(resolved_state)
+
+    for key, events in iteritems(conflicted_state):
+        if key not in resolved_state:
+            logger.debug("Resolving conflicted state %r:%r", key, events)
+            resolved_state[key] = _resolve_normal_events(
+                events, auth_events
+            )
+
+    return resolved_state
+
+
+def _resolve_auth_events(events, auth_events):
+    reverse = [i for i in reversed(_ordered_events(events))]
+
+    auth_keys = set(
+        key
+        for event in events
+        for key in event_auth.auth_types_for_event(event)
+    )
+
+    new_auth_events = {}
+    for key in auth_keys:
+        auth_event = auth_events.get(key, None)
+        if auth_event:
+            new_auth_events[key] = auth_event
+
+    auth_events = new_auth_events
+
+    prev_event = reverse[0]
+    for event in reverse[1:]:
+        auth_events[(prev_event.type, prev_event.state_key)] = prev_event
+        try:
+            # The signatures have already been checked at this point
+            event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
+            prev_event = event
+        except AuthError:
+            return prev_event
+
+    return event
+
+
+def _resolve_normal_events(events, auth_events):
+    for event in _ordered_events(events):
+        try:
+            # The signatures have already been checked at this point
+            event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
+            return event
+        except AuthError:
+            pass
+
+    # Use the last event (the one with the least depth) if they all fail
+    # the auth check.
+    return event
+
+
+def _ordered_events(events):
+    def key_func(e):
+        return -int(e.depth), hashlib.sha1(e.event_id.encode('ascii')).hexdigest()
+
+    return sorted(events, key=key_func)
diff --git a/synapse/static/client/register/index.html b/synapse/static/client/register/index.html
index 600b3ee41e..886f2edd1f 100644
--- a/synapse/static/client/register/index.html
+++ b/synapse/static/client/register/index.html
@@ -4,7 +4,7 @@
 <meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'> 
 <link rel="stylesheet" href="style.css">
 <script src="js/jquery-2.1.3.min.js"></script>
-<script src="js/recaptcha_ajax.js"></script>
+<script src="https://www.google.com/recaptcha/api/js/recaptcha_ajax.js"></script>
 <script src="register_config.js"></script>
 <script src="js/register.js"></script>
 </head>
diff --git a/synapse/static/client/register/js/recaptcha_ajax.js b/synapse/static/client/register/js/recaptcha_ajax.js
deleted file mode 100644
index d0e71e5b88..0000000000
--- a/synapse/static/client/register/js/recaptcha_ajax.js
+++ /dev/null
@@ -1,195 +0,0 @@
-(function(){var h,k=this,l=function(a){return void 0!==a},ba=function(){},n=function(a){var b=typeof a;if("object"==b)if(a){if(a instanceof Array)return"array";if(a instanceof Object)return b;var c=Object.prototype.toString.call(a);if("[object Window]"==c)return"object";if("[object Array]"==c||"number"==typeof a.length&&"undefined"!=typeof a.splice&&"undefined"!=typeof a.propertyIsEnumerable&&!a.propertyIsEnumerable("splice"))return"array";if("[object Function]"==c||"undefined"!=typeof a.call&&"undefined"!=typeof a.propertyIsEnumerable&&
-!a.propertyIsEnumerable("call"))return"function"}else return"null";else if("function"==b&&"undefined"==typeof a.call)return"object";return b},p=function(a){return"array"==n(a)},ca=function(a){var b=n(a);return"array"==b||"object"==b&&"number"==typeof a.length},q=function(a){return"string"==typeof a},r=function(a){return"function"==n(a)},da=function(a){var b=typeof a;return"object"==b&&null!=a||"function"==b},ea=function(a,b,c){return a.call.apply(a.bind,arguments)},fa=function(a,b,c){if(!a)throw Error();
-if(2<arguments.length){var d=Array.prototype.slice.call(arguments,2);return function(){var c=Array.prototype.slice.call(arguments);Array.prototype.unshift.apply(c,d);return a.apply(b,c)}}return function(){return a.apply(b,arguments)}},s=function(a,b,c){s=Function.prototype.bind&&-1!=Function.prototype.bind.toString().indexOf("native code")?ea:fa;return s.apply(null,arguments)},ga=function(a,b){var c=Array.prototype.slice.call(arguments,1);return function(){var b=c.slice();b.push.apply(b,arguments);
-return a.apply(this,b)}},ha=Date.now||function(){return+new Date},ia=null,t=function(a,b){var c=a.split("."),d=k;c[0]in d||!d.execScript||d.execScript("var "+c[0]);for(var e;c.length&&(e=c.shift());)!c.length&&l(b)?d[e]=b:d=d[e]?d[e]:d[e]={}},u=function(a,b){function c(){}c.prototype=b.prototype;a.superClass_=b.prototype;a.prototype=new c;a.base=function(a,c,g){return b.prototype[c].apply(a,Array.prototype.slice.call(arguments,2))}};
-Function.prototype.bind=Function.prototype.bind||function(a,b){if(1<arguments.length){var c=Array.prototype.slice.call(arguments,1);c.unshift(this,a);return s.apply(null,c)}return s(this,a)};var v={};t("RecaptchaTemplates",v);v.VertHtml='<table id="recaptcha_table" class="recaptchatable" > <tr> <td colspan="6" class=\'recaptcha_r1_c1\'></td> </tr> <tr> <td class=\'recaptcha_r2_c1\'></td> <td colspan="4" class=\'recaptcha_image_cell\'><center><div id="recaptcha_image"></div></center></td> <td class=\'recaptcha_r2_c2\'></td> </tr> <tr> <td rowspan="6" class=\'recaptcha_r3_c1\'></td> <td colspan="4" class=\'recaptcha_r3_c2\'></td> <td rowspan="6" class=\'recaptcha_r3_c3\'></td> </tr> <tr> <td rowspan="3" class=\'recaptcha_r4_c1\' height="49"> <div class="recaptcha_input_area"> <input name="recaptcha_response_field" id="recaptcha_response_field" type="text" autocorrect="off" autocapitalize="off" placeholder="" /> <span id="recaptcha_privacy" class="recaptcha_only_if_privacy"></span> </div> </td> <td rowspan="4" class=\'recaptcha_r4_c2\'></td> <td><a id=\'recaptcha_reload_btn\'><img id=\'recaptcha_reload\' width="25" height="17" /></a></td> <td rowspan="4" class=\'recaptcha_r4_c4\'></td> </tr> <tr> <td><a id=\'recaptcha_switch_audio_btn\' class="recaptcha_only_if_image"><img id=\'recaptcha_switch_audio\' width="25" height="16" alt="" /></a><a id=\'recaptcha_switch_img_btn\' class="recaptcha_only_if_audio"><img id=\'recaptcha_switch_img\' width="25" height="16" alt=""/></a></td> </tr> <tr> <td><a id=\'recaptcha_whatsthis_btn\'><img id=\'recaptcha_whatsthis\' width="25" height="16" /></a></td> </tr> <tr> <td class=\'recaptcha_r7_c1\'></td> <td class=\'recaptcha_r8_c1\'></td> </tr> </table> ';v.CleanCss=".recaptchatable td img{display:block}.recaptchatable .recaptcha_image_cell center img{height:57px}.recaptchatable .recaptcha_image_cell center{height:57px}.recaptchatable .recaptcha_image_cell{background-color:white;height:57px;padding:7px!important}.recaptchatable,#recaptcha_area tr,#recaptcha_area td,#recaptcha_area th{margin:0!important;border:0!important;border-collapse:collapse!important;vertical-align:middle!important}.recaptchatable *{margin:0;padding:0;border:0;color:black;position:static;top:auto;left:auto;right:auto;bottom:auto}.recaptchatable #recaptcha_image{position:relative;margin:auto;border:1px solid #dfdfdf!important}.recaptchatable #recaptcha_image #recaptcha_challenge_image{display:block}.recaptchatable #recaptcha_image #recaptcha_ad_image{display:block;position:absolute;top:0}.recaptchatable a img{border:0}.recaptchatable a,.recaptchatable a:hover{cursor:pointer;outline:none;border:0!important;padding:0!important;text-decoration:none;color:blue;background:none!important;font-weight:normal}.recaptcha_input_area{position:relative!important;background:none!important}.recaptchatable label.recaptcha_input_area_text{border:1px solid #dfdfdf!important;margin:0!important;padding:0!important;position:static!important;top:auto!important;left:auto!important;right:auto!important;bottom:auto!important}.recaptcha_theme_red label.recaptcha_input_area_text,.recaptcha_theme_white label.recaptcha_input_area_text{color:black!important}.recaptcha_theme_blackglass label.recaptcha_input_area_text{color:white!important}.recaptchatable #recaptcha_response_field{font-size:11pt}.recaptcha_theme_blackglass #recaptcha_response_field,.recaptcha_theme_white #recaptcha_response_field{border:1px solid gray}.recaptcha_theme_red #recaptcha_response_field{border:1px solid #cca940}.recaptcha_audio_cant_hear_link{font-size:7pt;color:black}.recaptchatable{line-height:1em;border:1px solid #dfdfdf!important}.recaptcha_error_text{color:red}.recaptcha_only_if_privacy{float:right;text-align:right;margin-right:7px}#recaptcha-ad-choices{position:absolute;height:15px;top:0;right:0}#recaptcha-ad-choices img{height:15px}.recaptcha-ad-choices-collapsed{width:30px;height:15px;display:block}.recaptcha-ad-choices-expanded{width:75px;height:15px;display:none}#recaptcha-ad-choices:hover .recaptcha-ad-choices-collapsed{display:none}#recaptcha-ad-choices:hover .recaptcha-ad-choices-expanded{display:block}";v.CleanHtml='<table id="recaptcha_table" class="recaptchatable"> <tr height="73"> <td class=\'recaptcha_image_cell\' width="302"><center><div id="recaptcha_image"></div></center></td> <td style="padding: 10px 7px 7px 7px;"> <a id=\'recaptcha_reload_btn\'><img id=\'recaptcha_reload\' width="25" height="18" alt="" /></a> <a id=\'recaptcha_switch_audio_btn\' class="recaptcha_only_if_image"><img id=\'recaptcha_switch_audio\' width="25" height="15" alt="" /></a><a id=\'recaptcha_switch_img_btn\' class="recaptcha_only_if_audio"><img id=\'recaptcha_switch_img\' width="25" height="15" alt=""/></a> <a id=\'recaptcha_whatsthis_btn\'><img id=\'recaptcha_whatsthis\' width="25" height="16" /></a> </td> <td style="padding: 18px 7px 18px 7px;"> <img id=\'recaptcha_logo\' alt="" width="71" height="36" /> </td> </tr> <tr> <td style="padding-left: 7px;"> <div class="recaptcha_input_area" style="padding-top: 2px; padding-bottom: 7px;"> <input style="border: 1px solid #3c3c3c; width: 302px;" name="recaptcha_response_field" id="recaptcha_response_field" type="text" /> </div> </td> <td colspan=2><span id="recaptcha_privacy" class="recaptcha_only_if_privacy"></span></td> </tr> </table> ';v.VertCss=".recaptchatable td img{display:block}.recaptchatable .recaptcha_r1_c1{background:url('IMGROOT/sprite.png') 0 -63px no-repeat;width:318px;height:9px}.recaptchatable .recaptcha_r2_c1{background:url('IMGROOT/sprite.png') -18px 0 no-repeat;width:9px;height:57px}.recaptchatable .recaptcha_r2_c2{background:url('IMGROOT/sprite.png') -27px 0 no-repeat;width:9px;height:57px}.recaptchatable .recaptcha_r3_c1{background:url('IMGROOT/sprite.png') 0 0 no-repeat;width:9px;height:63px}.recaptchatable .recaptcha_r3_c2{background:url('IMGROOT/sprite.png') -18px -57px no-repeat;width:300px;height:6px}.recaptchatable .recaptcha_r3_c3{background:url('IMGROOT/sprite.png') -9px 0 no-repeat;width:9px;height:63px}.recaptchatable .recaptcha_r4_c1{background:url('IMGROOT/sprite.png') -43px 0 no-repeat;width:171px;height:49px}.recaptchatable .recaptcha_r4_c2{background:url('IMGROOT/sprite.png') -36px 0 no-repeat;width:7px;height:57px}.recaptchatable .recaptcha_r4_c4{background:url('IMGROOT/sprite.png') -214px 0 no-repeat;width:97px;height:57px}.recaptchatable .recaptcha_r7_c1{background:url('IMGROOT/sprite.png') -43px -49px no-repeat;width:171px;height:8px}.recaptchatable .recaptcha_r8_c1{background:url('IMGROOT/sprite.png') -43px -49px no-repeat;width:25px;height:8px}.recaptchatable .recaptcha_image_cell center img{height:57px}.recaptchatable .recaptcha_image_cell center{height:57px}.recaptchatable .recaptcha_image_cell{background-color:white;height:57px}#recaptcha_area,#recaptcha_table{width:318px!important}.recaptchatable,#recaptcha_area tr,#recaptcha_area td,#recaptcha_area th{margin:0!important;border:0!important;padding:0!important;border-collapse:collapse!important;vertical-align:middle!important}.recaptchatable *{margin:0;padding:0;border:0;font-family:helvetica,sans-serif;font-size:8pt;color:black;position:static;top:auto;left:auto;right:auto;bottom:auto}.recaptchatable #recaptcha_image{position:relative;margin:auto}.recaptchatable #recaptcha_image #recaptcha_challenge_image{display:block}.recaptchatable #recaptcha_image #recaptcha_ad_image{display:block;position:absolute;top:0}.recaptchatable img{border:0!important;margin:0!important;padding:0!important}.recaptchatable a,.recaptchatable a:hover{cursor:pointer;outline:none;border:0!important;padding:0!important;text-decoration:none;color:blue;background:none!important;font-weight:normal}.recaptcha_input_area{position:relative!important;width:153px!important;height:45px!important;margin-left:7px!important;margin-right:7px!important;background:none!important}.recaptchatable label.recaptcha_input_area_text{margin:0!important;padding:0!important;position:static!important;top:auto!important;left:auto!important;right:auto!important;bottom:auto!important;background:none!important;height:auto!important;width:auto!important}.recaptcha_theme_red label.recaptcha_input_area_text,.recaptcha_theme_white label.recaptcha_input_area_text{color:black!important}.recaptcha_theme_blackglass label.recaptcha_input_area_text{color:white!important}.recaptchatable #recaptcha_response_field{width:153px!important;position:relative!important;bottom:7px!important;padding:0!important;margin:15px 0 0 0!important;font-size:10pt}.recaptcha_theme_blackglass #recaptcha_response_field,.recaptcha_theme_white #recaptcha_response_field{border:1px solid gray}.recaptcha_theme_red #recaptcha_response_field{border:1px solid #cca940}.recaptcha_audio_cant_hear_link{font-size:7pt;color:black}.recaptchatable{line-height:1!important}#recaptcha_instructions_error{color:red!important}.recaptcha_only_if_privacy{float:right;text-align:right}#recaptcha-ad-choices{position:absolute;height:15px;top:0;right:0}#recaptcha-ad-choices img{height:15px}.recaptcha-ad-choices-collapsed{width:30px;height:15px;display:block}.recaptcha-ad-choices-expanded{width:75px;height:15px;display:none}#recaptcha-ad-choices:hover .recaptcha-ad-choices-collapsed{display:none}#recaptcha-ad-choices:hover .recaptcha-ad-choices-expanded{display:block}";var w={visual_challenge:"Get a visual challenge",audio_challenge:"Get an audio challenge",refresh_btn:"Get a new challenge",instructions_visual:"Type the text:",instructions_audio:"Type what you hear:",help_btn:"Help",play_again:"Play sound again",cant_hear_this:"Download sound as MP3",incorrect_try_again:"Incorrect. Try again.",image_alt_text:"reCAPTCHA challenge image",privacy_and_terms:"Privacy & Terms"},ja={visual_challenge:"\u0627\u0644\u062d\u0635\u0648\u0644 \u0639\u0644\u0649 \u062a\u062d\u062f\u064d \u0645\u0631\u0626\u064a",
-audio_challenge:"\u0627\u0644\u062d\u0635\u0648\u0644 \u0639\u0644\u0649 \u062a\u062d\u062f\u064d \u0635\u0648\u062a\u064a",refresh_btn:"\u0627\u0644\u062d\u0635\u0648\u0644 \u0639\u0644\u0649 \u062a\u062d\u062f\u064d \u062c\u062f\u064a\u062f",instructions_visual:"\u064a\u0631\u062c\u0649 \u0643\u062a\u0627\u0628\u0629 \u0627\u0644\u0646\u0635:",instructions_audio:"\u0627\u0643\u062a\u0628 \u0645\u0627 \u062a\u0633\u0645\u0639\u0647:",help_btn:"\u0645\u0633\u0627\u0639\u062f\u0629",play_again:"\u062a\u0634\u063a\u064a\u0644 \u0627\u0644\u0635\u0648\u062a \u0645\u0631\u0629 \u0623\u062e\u0631\u0649",
-cant_hear_this:"\u062a\u0646\u0632\u064a\u0644 \u0627\u0644\u0635\u0648\u062a \u0628\u062a\u0646\u0633\u064a\u0642 MP3",incorrect_try_again:"\u063a\u064a\u0631 \u0635\u062d\u064a\u062d. \u0623\u0639\u062f \u0627\u0644\u0645\u062d\u0627\u0648\u0644\u0629.",image_alt_text:"\u0635\u0648\u0631\u0629 \u0627\u0644\u062a\u062d\u062f\u064a \u0645\u0646 reCAPTCHA",privacy_and_terms:"\u0627\u0644\u062e\u0635\u0648\u0635\u064a\u0629 \u0648\u0627\u0644\u0628\u0646\u0648\u062f"},ka={visual_challenge:"Obtener una pista visual",
-audio_challenge:"Obtener una pista sonora",refresh_btn:"Obtener una pista nueva",instructions_visual:"Introduzca el texto:",instructions_audio:"Escribe lo que oigas:",help_btn:"Ayuda",play_again:"Volver a reproducir el sonido",cant_hear_this:"Descargar el sonido en MP3",incorrect_try_again:"Incorrecto. Vu\u00e9lvelo a intentar.",image_alt_text:"Pista de imagen reCAPTCHA",privacy_and_terms:"Privacidad y condiciones"},la={visual_challenge:"Kumuha ng pagsubok na visual",audio_challenge:"Kumuha ng pagsubok na audio",
-refresh_btn:"Kumuha ng bagong pagsubok",instructions_visual:"I-type ang teksto:",instructions_audio:"I-type ang iyong narinig",help_btn:"Tulong",play_again:"I-play muli ang tunog",cant_hear_this:"I-download ang tunog bilang MP3",incorrect_try_again:"Hindi wasto. Muling subukan.",image_alt_text:"larawang panghamon ng reCAPTCHA",privacy_and_terms:"Privacy at Mga Tuntunin"},ma={visual_challenge:"Test visuel",audio_challenge:"Test audio",refresh_btn:"Nouveau test",instructions_visual:"Saisissez le texte\u00a0:",
-instructions_audio:"Qu'entendez-vous ?",help_btn:"Aide",play_again:"R\u00e9\u00e9couter",cant_hear_this:"T\u00e9l\u00e9charger l'audio au format MP3",incorrect_try_again:"Incorrect. Veuillez r\u00e9essayer.",image_alt_text:"Image reCAPTCHA",privacy_and_terms:"Confidentialit\u00e9 et conditions d'utilisation"},na={visual_challenge:"Dapatkan kata pengujian berbentuk visual",audio_challenge:"Dapatkan kata pengujian berbentuk audio",refresh_btn:"Dapatkan kata pengujian baru",instructions_visual:"Ketik teks:",
-instructions_audio:"Ketik yang Anda dengar:",help_btn:"Bantuan",play_again:"Putar suara sekali lagi",cant_hear_this:"Unduh suara sebagai MP3",incorrect_try_again:"Salah. Coba lagi.",image_alt_text:"Gambar tantangan reCAPTCHA",privacy_and_terms:"Privasi & Persyaratan"},oa={visual_challenge:"\u05e7\u05d1\u05dc \u05d0\u05ea\u05d2\u05e8 \u05d7\u05d6\u05d5\u05ea\u05d9",audio_challenge:"\u05e7\u05d1\u05dc \u05d0\u05ea\u05d2\u05e8 \u05e9\u05de\u05e2",refresh_btn:"\u05e7\u05d1\u05dc \u05d0\u05ea\u05d2\u05e8 \u05d7\u05d3\u05e9",
-instructions_visual:"\u05d4\u05e7\u05dc\u05d3 \u05d0\u05ea \u05d4\u05d8\u05e7\u05e1\u05d8:",instructions_audio:"\u05d4\u05e7\u05dc\u05d3 \u05d0\u05ea \u05de\u05d4 \u05e9\u05d0\u05ea\u05d4 \u05e9\u05d5\u05de\u05e2:",help_btn:"\u05e2\u05d6\u05e8\u05d4",play_again:"\u05d4\u05e4\u05e2\u05dc \u05e9\u05d5\u05d1 \u05d0\u05ea \u05d4\u05e9\u05de\u05e2",cant_hear_this:"\u05d4\u05d5\u05e8\u05d3 \u05e9\u05de\u05e2 \u05db-3MP",incorrect_try_again:"\u05e9\u05d2\u05d5\u05d9. \u05e0\u05e1\u05d4 \u05e9\u05d5\u05d1.",
-image_alt_text:"\u05ea\u05de\u05d5\u05e0\u05ea \u05d0\u05ea\u05d2\u05e8 \u05e9\u05dc reCAPTCHA",privacy_and_terms:"\u05e4\u05e8\u05d8\u05d9\u05d5\u05ea \u05d5\u05ea\u05e0\u05d0\u05d9\u05dd"},pa={visual_challenge:"Obter um desafio visual",audio_challenge:"Obter um desafio de \u00e1udio",refresh_btn:"Obter um novo desafio",instructions_visual:"Digite o texto:",instructions_audio:"Digite o que voc\u00ea ouve:",help_btn:"Ajuda",play_again:"Reproduzir som novamente",cant_hear_this:"Fazer download do som no formato MP3",
-incorrect_try_again:"Incorreto. Tente novamente.",image_alt_text:"Imagem de desafio reCAPTCHA",privacy_and_terms:"Privacidade e Termos"},qa={visual_challenge:"Ob\u0163ine\u0163i un cod captcha vizual",audio_challenge:"Ob\u0163ine\u0163i un cod captcha audio",refresh_btn:"Ob\u0163ine\u0163i un nou cod captcha",instructions_visual:"Introduce\u021bi textul:",instructions_audio:"Introduce\u0163i ceea ce auzi\u0163i:",help_btn:"Ajutor",play_again:"Reda\u0163i sunetul din nou",cant_hear_this:"Desc\u0103rca\u0163i fi\u015fierul audio ca MP3",
-incorrect_try_again:"Incorect. \u00cencerca\u0163i din nou.",image_alt_text:"Imagine de verificare reCAPTCHA",privacy_and_terms:"Confiden\u0163ialitate \u015fi termeni"},ra={visual_challenge:"\u6536\u5230\u4e00\u4e2a\u89c6\u9891\u9080\u8bf7",audio_challenge:"\u6362\u4e00\u7ec4\u97f3\u9891\u9a8c\u8bc1\u7801",refresh_btn:"\u6362\u4e00\u7ec4\u9a8c\u8bc1\u7801",instructions_visual:"\u8f93\u5165\u6587\u5b57\uff1a",instructions_audio:"\u8bf7\u952e\u5165\u60a8\u542c\u5230\u7684\u5185\u5bb9\uff1a",help_btn:"\u5e2e\u52a9",
-play_again:"\u91cd\u65b0\u64ad\u653e",cant_hear_this:"\u4ee5 MP3 \u683c\u5f0f\u4e0b\u8f7d\u58f0\u97f3",incorrect_try_again:"\u4e0d\u6b63\u786e\uff0c\u8bf7\u91cd\u8bd5\u3002",image_alt_text:"reCAPTCHA \u9a8c\u8bc1\u56fe\u7247",privacy_and_terms:"\u9690\u79c1\u6743\u548c\u4f7f\u7528\u6761\u6b3e"},sa={en:w,af:{visual_challenge:"Kry 'n visuele verifi\u00ebring",audio_challenge:"Kry 'n klankverifi\u00ebring",refresh_btn:"Kry 'n nuwe verifi\u00ebring",instructions_visual:"",instructions_audio:"Tik wat jy hoor:",
-help_btn:"Hulp",play_again:"Speel geluid weer",cant_hear_this:"Laai die klank af as MP3",incorrect_try_again:"Verkeerd. Probeer weer.",image_alt_text:"reCAPTCHA-uitdagingprent",privacy_and_terms:"Privaatheid en bepalings"},am:{visual_challenge:"\u12e8\u12a5\u12ed\u1273 \u1270\u130b\u1323\u121a \u12a0\u130d\u129d",audio_challenge:"\u120c\u120b \u12a0\u12f2\u1235 \u12e8\u12f5\u121d\u133d \u1325\u12eb\u1244 \u12ed\u1245\u1228\u1265",refresh_btn:"\u120c\u120b \u12a0\u12f2\u1235 \u1325\u12eb\u1244 \u12ed\u1245\u1228\u1265",
-instructions_visual:"",instructions_audio:"\u12e8\u121d\u1275\u1230\u121b\u12cd\u1295 \u1270\u12ed\u1265\u1361-",help_btn:"\u12a5\u1308\u12db",play_again:"\u12f5\u121d\u1339\u1295 \u12a5\u1295\u12f0\u1308\u1293 \u12a0\u132b\u12cd\u1275",cant_hear_this:"\u12f5\u121d\u1339\u1295 \u1260MP3 \u1245\u122d\u133d \u12a0\u12cd\u122d\u12f5",incorrect_try_again:"\u1275\u12ad\u12ad\u120d \u12a0\u12ed\u12f0\u1208\u121d\u1362 \u12a5\u1295\u12f0\u1308\u1293 \u121e\u12ad\u122d\u1362",image_alt_text:"reCAPTCHA \u121d\u1235\u120d \u130d\u1320\u121d",
-privacy_and_terms:"\u130d\u120b\u12ca\u1290\u1275 \u12a5\u1293 \u12cd\u120d"},ar:ja,"ar-EG":ja,bg:{visual_challenge:"\u041f\u043e\u043b\u0443\u0447\u0430\u0432\u0430\u043d\u0435 \u043d\u0430 \u0432\u0438\u0437\u0443\u0430\u043b\u043d\u0430 \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0430",audio_challenge:"\u0417\u0430\u0440\u0435\u0436\u0434\u0430\u043d\u0435 \u043d\u0430 \u0430\u0443\u0434\u0438\u043e\u0442\u0435\u0441\u0442",refresh_btn:"\u0417\u0430\u0440\u0435\u0436\u0434\u0430\u043d\u0435 \u043d\u0430 \u043d\u043e\u0432 \u0442\u0435\u0441\u0442",
-instructions_visual:"\u0412\u044a\u0432\u0435\u0434\u0435\u0442\u0435 \u0442\u0435\u043a\u0441\u0442\u0430:",instructions_audio:"\u0412\u044a\u0432\u0435\u0434\u0435\u0442\u0435 \u0447\u0443\u0442\u043e\u0442\u043e:",help_btn:"\u041f\u043e\u043c\u043e\u0449",play_again:"\u041f\u043e\u0432\u0442\u043e\u0440\u043d\u043e \u043f\u0443\u0441\u043a\u0430\u043d\u0435 \u043d\u0430 \u0437\u0432\u0443\u043a\u0430",cant_hear_this:"\u0418\u0437\u0442\u0435\u0433\u043b\u044f\u043d\u0435 \u043d\u0430 \u0437\u0432\u0443\u043a\u0430 \u0432\u044a\u0432 \u0444\u043e\u0440\u043c\u0430\u0442 MP3",
-incorrect_try_again:"\u041d\u0435\u043f\u0440\u0430\u0432\u0438\u043b\u043d\u043e. \u041e\u043f\u0438\u0442\u0430\u0439\u0442\u0435 \u043e\u0442\u043d\u043e\u0432\u043e.",image_alt_text:"\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435 \u043d\u0430 \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0430\u0442\u0430 \u0441 reCAPTCHA",privacy_and_terms:"\u041f\u043e\u0432\u0435\u0440\u0438\u0442\u0435\u043b\u043d\u043e\u0441\u0442 \u0438 \u041e\u0431\u0449\u0438 \u0443\u0441\u043b\u043e\u0432\u0438\u044f"},
-bn:{visual_challenge:"\u098f\u0995\u099f\u09bf \u09a6\u09c3\u09b6\u09cd\u09af\u09ae\u09be\u09a8 \u09aa\u09cd\u09b0\u09a4\u09bf\u09a6\u09cd\u09ac\u09a8\u09cd\u09a6\u09cd\u09ac\u09bf\u09a4\u09be \u09aa\u09be\u09a8",audio_challenge:"\u098f\u0995\u099f\u09bf \u0985\u09a1\u09bf\u0993 \u09aa\u09cd\u09b0\u09a4\u09bf\u09a6\u09cd\u09ac\u09a8\u09cd\u09a6\u09cd\u09ac\u09bf\u09a4\u09be  \u09aa\u09be\u09a8",refresh_btn:"\u098f\u0995\u099f\u09bf \u09a8\u09a4\u09c1\u09a8 \u09aa\u09cd\u09b0\u09a4\u09bf\u09a6\u09cd\u09ac\u09a8\u09cd\u09a6\u09cd\u09ac\u09bf\u09a4\u09be  \u09aa\u09be\u09a8",
-instructions_visual:"",instructions_audio:"\u0986\u09aa\u09a8\u09bf \u09af\u09be \u09b6\u09c1\u09a8\u099b\u09c7\u09a8 \u09a4\u09be \u09b2\u09bf\u0996\u09c1\u09a8:",help_btn:"\u09b8\u09b9\u09be\u09df\u09a4\u09be",play_again:"\u0986\u09ac\u09be\u09b0 \u09b8\u09be\u0989\u09a8\u09cd\u09a1 \u09aa\u09cd\u09b2\u09c7 \u0995\u09b0\u09c1\u09a8",cant_hear_this:"MP3 \u09b0\u09c2\u09aa\u09c7 \u09b6\u09ac\u09cd\u09a6 \u09a1\u09be\u0989\u09a8\u09b2\u09cb\u09a1 \u0995\u09b0\u09c1\u09a8",incorrect_try_again:"\u09ac\u09c7\u09a0\u09bf\u0995\u09f7 \u0986\u09ac\u09be\u09b0 \u099a\u09c7\u09b7\u09cd\u099f\u09be \u0995\u09b0\u09c1\u09a8\u09f7",
-image_alt_text:"reCAPTCHA \u099a\u09cd\u09af\u09be\u09b2\u09c7\u099e\u09cd\u099c \u099a\u09bf\u09a4\u09cd\u09b0",privacy_and_terms:"\u0997\u09cb\u09aa\u09a8\u09c0\u09af\u09bc\u09a4\u09be \u0993 \u09b6\u09b0\u09cd\u09a4\u09be\u09ac\u09b2\u09c0"},ca:{visual_challenge:"Obt\u00e9n un repte visual",audio_challenge:"Obteniu una pista sonora",refresh_btn:"Obteniu una pista nova",instructions_visual:"Escriviu el text:",instructions_audio:"Escriviu el que escolteu:",help_btn:"Ajuda",play_again:"Torna a reproduir el so",
-cant_hear_this:"Baixa el so com a MP3",incorrect_try_again:"No \u00e9s correcte. Torna-ho a provar.",image_alt_text:"Imatge del repte de reCAPTCHA",privacy_and_terms:"Privadesa i condicions"},cs:{visual_challenge:"Zobrazit vizu\u00e1ln\u00ed podobu v\u00fdrazu",audio_challenge:"P\u0159ehr\u00e1t zvukovou podobu v\u00fdrazu",refresh_btn:"Zobrazit nov\u00fd v\u00fdraz",instructions_visual:"Zadejte text:",instructions_audio:"Napi\u0161te, co jste sly\u0161eli:",help_btn:"N\u00e1pov\u011bda",play_again:"Znovu p\u0159ehr\u00e1t zvuk",
-cant_hear_this:"St\u00e1hnout zvuk ve form\u00e1tu MP3",incorrect_try_again:"\u0160patn\u011b. Zkuste to znovu.",image_alt_text:"Obr\u00e1zek reCAPTCHA",privacy_and_terms:"Ochrana soukrom\u00ed a smluvn\u00ed podm\u00ednky"},da:{visual_challenge:"Hent en visuel udfordring",audio_challenge:"Hent en lydudfordring",refresh_btn:"Hent en ny udfordring",instructions_visual:"Indtast teksten:",instructions_audio:"Indtast det, du h\u00f8rer:",help_btn:"Hj\u00e6lp",play_again:"Afspil lyden igen",cant_hear_this:"Download lyd som MP3",
-incorrect_try_again:"Forkert. Pr\u00f8v igen.",image_alt_text:"reCAPTCHA-udfordringsbillede",privacy_and_terms:"Privatliv og vilk\u00e5r"},de:{visual_challenge:"Captcha abrufen",audio_challenge:"Audio-Captcha abrufen",refresh_btn:"Neues Captcha abrufen",instructions_visual:"Geben Sie den angezeigten Text ein:",instructions_audio:"Geben Sie das Geh\u00f6rte ein:",help_btn:"Hilfe",play_again:"Wort erneut abspielen",cant_hear_this:"Wort als MP3 herunterladen",incorrect_try_again:"Falsch. Bitte versuchen Sie es erneut.",
-image_alt_text:"reCAPTCHA-Bild",privacy_and_terms:"Datenschutzerkl\u00e4rung & Nutzungsbedingungen"},el:{visual_challenge:"\u039f\u03c0\u03c4\u03b9\u03ba\u03ae \u03c0\u03c1\u03cc\u03ba\u03bb\u03b7\u03c3\u03b7",audio_challenge:"\u0397\u03c7\u03b7\u03c4\u03b9\u03ba\u03ae \u03c0\u03c1\u03cc\u03ba\u03bb\u03b7\u03c3\u03b7",refresh_btn:"\u039d\u03ad\u03b1 \u03c0\u03c1\u03cc\u03ba\u03bb\u03b7\u03c3\u03b7",instructions_visual:"\u03a0\u03bb\u03b7\u03ba\u03c4\u03c1\u03bf\u03bb\u03bf\u03b3\u03ae\u03c3\u03c4\u03b5 \u03c4\u03bf \u03ba\u03b5\u03af\u03bc\u03b5\u03bd\u03bf:",
-instructions_audio:"\u03a0\u03bb\u03b7\u03ba\u03c4\u03c1\u03bf\u03bb\u03bf\u03b3\u03ae\u03c3\u03c4\u03b5 \u03cc\u03c4\u03b9 \u03b1\u03ba\u03bf\u03cd\u03c4\u03b5:",help_btn:"\u0392\u03bf\u03ae\u03b8\u03b5\u03b9\u03b1",play_again:"\u0391\u03bd\u03b1\u03c0\u03b1\u03c1\u03b1\u03b3\u03c9\u03b3\u03ae \u03ae\u03c7\u03bf\u03c5 \u03be\u03b1\u03bd\u03ac",cant_hear_this:"\u039b\u03ae\u03c8\u03b7 \u03ae\u03c7\u03bf\u03c5 \u03c9\u03c2 \u039c\u03a13",incorrect_try_again:"\u039b\u03ac\u03b8\u03bf\u03c2. \u0394\u03bf\u03ba\u03b9\u03bc\u03ac\u03c3\u03c4\u03b5 \u03be\u03b1\u03bd\u03ac.",
-image_alt_text:"\u0395\u03b9\u03ba\u03cc\u03bd\u03b1 \u03c0\u03c1\u03cc\u03ba\u03bb\u03b7\u03c3\u03b7\u03c2 reCAPTCHA",privacy_and_terms:"\u0391\u03c0\u03cc\u03c1\u03c1\u03b7\u03c4\u03bf \u03ba\u03b1\u03b9 \u03cc\u03c1\u03bf\u03b9"},"en-GB":w,"en-US":w,es:ka,"es-419":{visual_challenge:"Enfrentar un desaf\u00edo visual",audio_challenge:"Enfrentar un desaf\u00edo de audio",refresh_btn:"Enfrentar un nuevo desaf\u00edo",instructions_visual:"Escriba el texto:",instructions_audio:"Escribe lo que escuchas:",
-help_btn:"Ayuda",play_again:"Reproducir sonido de nuevo",cant_hear_this:"Descargar sonido en formato MP3",incorrect_try_again:"Incorrecto. Vuelve a intentarlo.",image_alt_text:"Imagen del desaf\u00edo de la reCAPTCHA",privacy_and_terms:"Privacidad y condiciones"},"es-ES":ka,et:{visual_challenge:"Kuva kuvap\u00f5hine robotil\u00f5ks",audio_challenge:"Kuva helip\u00f5hine robotil\u00f5ks",refresh_btn:"Kuva uus robotil\u00f5ks",instructions_visual:"Tippige tekst:",instructions_audio:"Tippige, mida kuulete.",
-help_btn:"Abi",play_again:"Esita heli uuesti",cant_hear_this:"Laadi heli alla MP3-vormingus",incorrect_try_again:"Vale. Proovige uuesti.",image_alt_text:"reCAPTCHA robotil\u00f5ksu kujutis",privacy_and_terms:"Privaatsus ja tingimused"},eu:{visual_challenge:"Eskuratu ikusizko erronka",audio_challenge:"Eskuratu audio-erronka",refresh_btn:"Eskuratu erronka berria",instructions_visual:"",instructions_audio:"Idatzi entzuten duzuna:",help_btn:"Laguntza",play_again:"Erreproduzitu soinua berriro",cant_hear_this:"Deskargatu soinua MP3 gisa",
-incorrect_try_again:"Ez da zuzena. Saiatu berriro.",image_alt_text:"reCAPTCHA erronkaren irudia",privacy_and_terms:"Pribatutasuna eta baldintzak"},fa:{visual_challenge:"\u062f\u0631\u06cc\u0627\u0641\u062a \u06cc\u06a9 \u0645\u0639\u0645\u0627\u06cc \u062f\u06cc\u062f\u0627\u0631\u06cc",audio_challenge:"\u062f\u0631\u06cc\u0627\u0641\u062a \u06cc\u06a9 \u0645\u0639\u0645\u0627\u06cc \u0635\u0648\u062a\u06cc",refresh_btn:"\u062f\u0631\u06cc\u0627\u0641\u062a \u06cc\u06a9 \u0645\u0639\u0645\u0627\u06cc \u062c\u062f\u06cc\u062f",
-instructions_visual:"",instructions_audio:"\u0622\u0646\u0686\u0647 \u0631\u0627 \u06a9\u0647 \u0645\u06cc\u200c\u0634\u0646\u0648\u06cc\u062f \u062a\u0627\u06cc\u067e \u06a9\u0646\u06cc\u062f:",help_btn:"\u0631\u0627\u0647\u0646\u0645\u0627\u06cc\u06cc",play_again:"\u067e\u062e\u0634 \u0645\u062c\u062f\u062f \u0635\u062f\u0627",cant_hear_this:"\u062f\u0627\u0646\u0644\u0648\u062f \u0635\u062f\u0627 \u0628\u0647 \u0635\u0648\u0631\u062a MP3",incorrect_try_again:"\u0646\u0627\u062f\u0631\u0633\u062a. \u062f\u0648\u0628\u0627\u0631\u0647 \u0627\u0645\u062a\u062d\u0627\u0646 \u06a9\u0646\u06cc\u062f.",
-image_alt_text:"\u062a\u0635\u0648\u06cc\u0631 \u0686\u0627\u0644\u0634\u06cc reCAPTCHA",privacy_and_terms:"\u062d\u0631\u06cc\u0645 \u062e\u0635\u0648\u0635\u06cc \u0648 \u0634\u0631\u0627\u06cc\u0637"},fi:{visual_challenge:"Kuvavahvistus",audio_challenge:"\u00c4\u00e4nivahvistus",refresh_btn:"Uusi kuva",instructions_visual:"Kirjoita teksti:",instructions_audio:"Kirjoita kuulemasi:",help_btn:"Ohje",play_again:"Toista \u00e4\u00e4ni uudelleen",cant_hear_this:"Lataa \u00e4\u00e4ni MP3-tiedostona",
-incorrect_try_again:"V\u00e4\u00e4rin. Yrit\u00e4 uudelleen.",image_alt_text:"reCAPTCHA-kuva",privacy_and_terms:"Tietosuoja ja k\u00e4ytt\u00f6ehdot"},fil:la,fr:ma,"fr-CA":{visual_challenge:"Obtenir un test visuel",audio_challenge:"Obtenir un test audio",refresh_btn:"Obtenir un nouveau test",instructions_visual:"Saisissez le texte\u00a0:",instructions_audio:"Tapez ce que vous entendez\u00a0:",help_btn:"Aide",play_again:"Jouer le son de nouveau",cant_hear_this:"T\u00e9l\u00e9charger le son en format MP3",
-incorrect_try_again:"Erreur, essayez \u00e0 nouveau",image_alt_text:"Image reCAPTCHA",privacy_and_terms:"Confidentialit\u00e9 et conditions d'utilisation"},"fr-FR":ma,gl:{visual_challenge:"Obter unha proba visual",audio_challenge:"Obter unha proba de audio",refresh_btn:"Obter unha proba nova",instructions_visual:"",instructions_audio:"Escribe o que escoitas:",help_btn:"Axuda",play_again:"Reproducir o son de novo",cant_hear_this:"Descargar son como MP3",incorrect_try_again:"Incorrecto. T\u00e9ntao de novo.",
-image_alt_text:"Imaxe de proba de reCAPTCHA",privacy_and_terms:"Privacidade e condici\u00f3ns"},gu:{visual_challenge:"\u0a8f\u0a95 \u0aa6\u0ac3\u0ab6\u0acd\u0aaf\u0abe\u0aa4\u0acd\u0aae\u0a95 \u0aaa\u0aa1\u0a95\u0abe\u0ab0 \u0aae\u0ac7\u0ab3\u0ab5\u0acb",audio_challenge:"\u0a8f\u0a95 \u0a91\u0aa1\u0abf\u0a93 \u0aaa\u0aa1\u0a95\u0abe\u0ab0 \u0aae\u0ac7\u0ab3\u0ab5\u0acb",refresh_btn:"\u0a8f\u0a95 \u0aa8\u0ab5\u0acb \u0aaa\u0aa1\u0a95\u0abe\u0ab0 \u0aae\u0ac7\u0ab3\u0ab5\u0acb",instructions_visual:"",
-instructions_audio:"\u0aa4\u0aae\u0ac7 \u0a9c\u0ac7 \u0ab8\u0abe\u0a82\u0aad\u0ab3\u0acb \u0a9b\u0acb \u0aa4\u0ac7 \u0ab2\u0a96\u0acb:",help_btn:"\u0ab8\u0ab9\u0abe\u0aaf",play_again:"\u0aa7\u0acd\u0ab5\u0aa8\u0abf \u0aab\u0ab0\u0ac0\u0aa5\u0ac0 \u0a9a\u0ab2\u0abe\u0ab5\u0acb",cant_hear_this:"MP3 \u0aa4\u0ab0\u0ac0\u0a95\u0ac7 \u0aa7\u0acd\u0ab5\u0aa8\u0abf\u0aa8\u0ac7 \u0aa1\u0abe\u0a89\u0aa8\u0ab2\u0acb\u0aa1 \u0a95\u0ab0\u0acb",incorrect_try_again:"\u0a96\u0acb\u0a9f\u0ac1\u0a82. \u0aab\u0ab0\u0ac0 \u0aaa\u0acd\u0ab0\u0aaf\u0abe\u0ab8 \u0a95\u0ab0\u0acb.",
-image_alt_text:"reCAPTCHA \u0aaa\u0aa1\u0a95\u0abe\u0ab0 \u0a9b\u0aac\u0ac0",privacy_and_terms:"\u0a97\u0acb\u0aaa\u0aa8\u0ac0\u0aaf\u0aa4\u0abe \u0a85\u0aa8\u0ac7 \u0ab6\u0ab0\u0aa4\u0acb"},hi:{visual_challenge:"\u0915\u094b\u0908 \u0935\u093f\u091c\u0941\u0905\u0932 \u091a\u0941\u0928\u094c\u0924\u0940 \u0932\u0947\u0902",audio_challenge:"\u0915\u094b\u0908 \u0911\u0921\u093f\u092f\u094b \u091a\u0941\u0928\u094c\u0924\u0940 \u0932\u0947\u0902",refresh_btn:"\u0915\u094b\u0908 \u0928\u0908 \u091a\u0941\u0928\u094c\u0924\u0940 \u0932\u0947\u0902",
-instructions_visual:"\u091f\u0947\u0915\u094d\u0938\u094d\u091f \u091f\u093e\u0907\u092a \u0915\u0930\u0947\u0902:",instructions_audio:"\u091c\u094b \u0906\u092a \u0938\u0941\u0928 \u0930\u0939\u0947 \u0939\u0948\u0902 \u0909\u0938\u0947 \u0932\u093f\u0916\u0947\u0902:",help_btn:"\u0938\u0939\u093e\u092f\u0924\u093e",play_again:"\u0927\u094d\u200d\u0935\u0928\u093f \u092a\u0941\u0928: \u091a\u0932\u093e\u090f\u0902",cant_hear_this:"\u0927\u094d\u200d\u0935\u0928\u093f \u0915\u094b MP3 \u0915\u0947 \u0930\u0942\u092a \u092e\u0947\u0902 \u0921\u093e\u0909\u0928\u0932\u094b\u0921 \u0915\u0930\u0947\u0902",
-incorrect_try_again:"\u0917\u0932\u0924. \u092a\u0941\u0928: \u092a\u094d\u0930\u092f\u093e\u0938 \u0915\u0930\u0947\u0902.",image_alt_text:"reCAPTCHA \u091a\u0941\u0928\u094c\u0924\u0940 \u091a\u093f\u0924\u094d\u0930",privacy_and_terms:"\u0917\u094b\u092a\u0928\u0940\u092f\u0924\u093e \u0914\u0930 \u0936\u0930\u094d\u0924\u0947\u0902"},hr:{visual_challenge:"Dohvati vizualni upit",audio_challenge:"Dohvati zvu\u010dni upit",refresh_btn:"Dohvati novi upit",instructions_visual:"Unesite tekst:",instructions_audio:"Upi\u0161ite \u0161to \u010dujete:",
-help_btn:"Pomo\u0107",play_again:"Ponovi zvuk",cant_hear_this:"Preuzmi zvuk u MP3 formatu",incorrect_try_again:"Nije to\u010dno. Poku\u0161ajte ponovno.",image_alt_text:"Slikovni izazov reCAPTCHA",privacy_and_terms:"Privatnost i odredbe"},hu:{visual_challenge:"Vizu\u00e1lis kih\u00edv\u00e1s k\u00e9r\u00e9se",audio_challenge:"Hangkih\u00edv\u00e1s k\u00e9r\u00e9se",refresh_btn:"\u00daj kih\u00edv\u00e1s k\u00e9r\u00e9se",instructions_visual:"\u00cdrja be a sz\u00f6veget:",instructions_audio:"\u00cdrja le, amit hall:",
-help_btn:"S\u00fag\u00f3",play_again:"Hang ism\u00e9telt lej\u00e1tsz\u00e1sa",cant_hear_this:"Hang let\u00f6lt\u00e9se MP3 form\u00e1tumban",incorrect_try_again:"Hib\u00e1s. Pr\u00f3b\u00e1lkozzon \u00fajra.",image_alt_text:"reCAPTCHA ellen\u0151rz\u0151 k\u00e9p",privacy_and_terms:"Adatv\u00e9delem \u00e9s Szerz\u0151d\u00e9si Felt\u00e9telek"},hy:{visual_challenge:"\u054d\u057f\u0561\u0576\u0561\u056c \u057f\u0565\u057d\u0578\u0572\u0561\u056f\u0561\u0576 \u056d\u0576\u0564\u056b\u0580",audio_challenge:"\u054d\u057f\u0561\u0576\u0561\u056c \u0571\u0561\u0575\u0576\u0561\u0575\u056b\u0576 \u056d\u0576\u0564\u056b\u0580",
-refresh_btn:"\u054d\u057f\u0561\u0576\u0561\u056c \u0576\u0578\u0580 \u056d\u0576\u0564\u056b\u0580",instructions_visual:"\u0544\u0578\u0582\u057f\u0584\u0561\u0563\u0580\u0565\u0584 \u057f\u0565\u0584\u057d\u057f\u0568\u055d",instructions_audio:"\u0544\u0578\u0582\u057f\u0584\u0561\u0563\u0580\u0565\u0584 \u0561\u0575\u0576, \u056b\u0576\u0579 \u056c\u057d\u0578\u0582\u0574 \u0565\u0584\u055d",help_btn:"\u0555\u0563\u0576\u0578\u0582\u0569\u0575\u0578\u0582\u0576",play_again:"\u0546\u057e\u0561\u0563\u0561\u0580\u056f\u0565\u056c \u0571\u0561\u0575\u0576\u0568 \u056f\u0580\u056f\u056b\u0576",
-cant_hear_this:"\u0532\u0565\u057c\u0576\u0565\u056c \u0571\u0561\u0575\u0576\u0568 \u0578\u0580\u057a\u0565\u057d MP3",incorrect_try_again:"\u054d\u056d\u0561\u056c \u0567: \u0553\u0578\u0580\u0571\u0565\u0584 \u056f\u0580\u056f\u056b\u0576:",image_alt_text:"reCAPTCHA \u057a\u0561\u057f\u056f\u0565\u0580\u0578\u057e \u056d\u0576\u0564\u056b\u0580",privacy_and_terms:"\u0533\u0561\u0572\u057f\u0576\u056b\u0578\u0582\u0569\u0575\u0561\u0576 & \u057a\u0561\u0575\u0574\u0561\u0576\u0576\u0565\u0580"},
-id:na,is:{visual_challenge:"F\u00e1 a\u00f0gangspr\u00f3f sem mynd",audio_challenge:"F\u00e1 a\u00f0gangspr\u00f3f sem hlj\u00f3\u00f0skr\u00e1",refresh_btn:"F\u00e1 n\u00fdtt a\u00f0gangspr\u00f3f",instructions_visual:"",instructions_audio:"Sl\u00e1\u00f0u inn \u00fea\u00f0 sem \u00fe\u00fa heyrir:",help_btn:"Hj\u00e1lp",play_again:"Spila hlj\u00f3\u00f0 aftur",cant_hear_this:"S\u00e6kja hlj\u00f3\u00f0 sem MP3",incorrect_try_again:"Rangt. Reyndu aftur.",image_alt_text:"mynd reCAPTCHA a\u00f0gangspr\u00f3fs",
-privacy_and_terms:"Pers\u00f3nuvernd og skilm\u00e1lar"},it:{visual_challenge:"Verifica visiva",audio_challenge:"Verifica audio",refresh_btn:"Nuova verifica",instructions_visual:"Digita il testo:",instructions_audio:"Digita ci\u00f2 che senti:",help_btn:"Guida",play_again:"Riproduci di nuovo audio",cant_hear_this:"Scarica audio in MP3",incorrect_try_again:"Sbagliato. Riprova.",image_alt_text:"Immagine di verifica reCAPTCHA",privacy_and_terms:"Privacy e Termini"},iw:oa,ja:{visual_challenge:"\u753b\u50cf\u3067\u78ba\u8a8d\u3057\u307e\u3059",
-audio_challenge:"\u97f3\u58f0\u3067\u78ba\u8a8d\u3057\u307e\u3059",refresh_btn:"\u5225\u306e\u5358\u8a9e\u3067\u3084\u308a\u76f4\u3057\u307e\u3059",instructions_visual:"\u30c6\u30ad\u30b9\u30c8\u3092\u5165\u529b:",instructions_audio:"\u805e\u3053\u3048\u305f\u5358\u8a9e\u3092\u5165\u529b\u3057\u307e\u3059:",help_btn:"\u30d8\u30eb\u30d7",play_again:"\u3082\u3046\u4e00\u5ea6\u805e\u304f",cant_hear_this:"MP3 \u3067\u97f3\u58f0\u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9",incorrect_try_again:"\u6b63\u3057\u304f\u3042\u308a\u307e\u305b\u3093\u3002\u3082\u3046\u4e00\u5ea6\u3084\u308a\u76f4\u3057\u3066\u304f\u3060\u3055\u3044\u3002",
-image_alt_text:"reCAPTCHA \u78ba\u8a8d\u7528\u753b\u50cf",privacy_and_terms:"\u30d7\u30e9\u30a4\u30d0\u30b7\u30fc\u3068\u5229\u7528\u898f\u7d04"},kn:{visual_challenge:"\u0ca6\u0cc3\u0cb6\u0ccd\u0caf \u0cb8\u0cb5\u0cbe\u0cb2\u0cca\u0c82\u0ca6\u0ca8\u0ccd\u0ca8\u0cc1 \u0cb8\u0ccd\u0cb5\u0cc0\u0c95\u0cb0\u0cbf\u0cb8\u0cbf",audio_challenge:"\u0c86\u0ca1\u0cbf\u0caf\u0ccb \u0cb8\u0cb5\u0cbe\u0cb2\u0cca\u0c82\u0ca6\u0ca8\u0ccd\u0ca8\u0cc1 \u0cb8\u0ccd\u0cb5\u0cc0\u0c95\u0cb0\u0cbf\u0cb8\u0cbf",refresh_btn:"\u0cb9\u0cca\u0cb8 \u0cb8\u0cb5\u0cbe\u0cb2\u0cca\u0c82\u0ca6\u0ca8\u0ccd\u0ca8\u0cc1 \u0caa\u0ca1\u0cc6\u0caf\u0cbf\u0cb0\u0cbf",
-instructions_visual:"",instructions_audio:"\u0ca8\u0cbf\u0cae\u0c97\u0cc6 \u0c95\u0cc7\u0cb3\u0cbf\u0cb8\u0cc1\u0cb5\u0cc1\u0ca6\u0ca8\u0ccd\u0ca8\u0cc1 \u0c9f\u0cc8\u0caa\u0ccd\u200c \u0cae\u0cbe\u0ca1\u0cbf:",help_btn:"\u0cb8\u0cb9\u0cbe\u0caf",play_again:"\u0ca7\u0ccd\u0cb5\u0ca8\u0cbf\u0caf\u0ca8\u0ccd\u0ca8\u0cc1 \u0cae\u0ca4\u0ccd\u0ca4\u0cc6 \u0caa\u0ccd\u0cb2\u0cc7 \u0cae\u0cbe\u0ca1\u0cbf",cant_hear_this:"\u0ca7\u0ccd\u0cb5\u0ca8\u0cbf\u0caf\u0ca8\u0ccd\u0ca8\u0cc1 MP3 \u0cb0\u0cc2\u0caa\u0ca6\u0cb2\u0ccd\u0cb2\u0cbf \u0ca1\u0ccc\u0ca8\u0ccd\u200c\u0cb2\u0ccb\u0ca1\u0ccd \u0cae\u0cbe\u0ca1\u0cbf",
-incorrect_try_again:"\u0ca4\u0caa\u0ccd\u0caa\u0cbe\u0c97\u0cbf\u0ca6\u0cc6. \u0cae\u0ca4\u0ccd\u0ca4\u0cca\u0cae\u0ccd\u0cae\u0cc6 \u0caa\u0ccd\u0cb0\u0caf\u0ca4\u0ccd\u0ca8\u0cbf\u0cb8\u0cbf.",image_alt_text:"reCAPTCHA \u0cb8\u0cb5\u0cbe\u0cb2\u0cc1 \u0c9a\u0cbf\u0ca4\u0ccd\u0cb0",privacy_and_terms:"\u0c97\u0ccc\u0caa\u0ccd\u0caf\u0ca4\u0cc6 \u0cae\u0ca4\u0ccd\u0ca4\u0cc1 \u0ca8\u0cbf\u0caf\u0cae\u0c97\u0cb3\u0cc1"},ko:{visual_challenge:"\uadf8\ub9bc\uc73c\ub85c \ubcf4\uc548\ubb38\uc790 \ubc1b\uae30",
-audio_challenge:"\uc74c\uc131\uc73c\ub85c \ubcf4\uc548\ubb38\uc790 \ubc1b\uae30",refresh_btn:"\ubcf4\uc548\ubb38\uc790 \uc0c8\ub85c \ubc1b\uae30",instructions_visual:"\ud14d\uc2a4\ud2b8 \uc785\ub825:",instructions_audio:"\uc74c\uc131 \ubcf4\uc548\ubb38\uc790 \uc785\ub825:",help_btn:"\ub3c4\uc6c0\ub9d0",play_again:"\uc74c\uc131 \ub2e4\uc2dc \ub4e3\uae30",cant_hear_this:"\uc74c\uc131\uc744 MP3\ub85c \ub2e4\uc6b4\ub85c\ub4dc",incorrect_try_again:"\ud2c0\ub838\uc2b5\ub2c8\ub2e4. \ub2e4\uc2dc \uc2dc\ub3c4\ud574 \uc8fc\uc138\uc694.",
-image_alt_text:"reCAPTCHA \ubcf4\uc548\ubb38\uc790 \uc774\ubbf8\uc9c0",privacy_and_terms:"\uac1c\uc778\uc815\ubcf4 \ubcf4\ud638 \ubc0f \uc57d\uad00"},ln:ma,lt:{visual_challenge:"Gauti vaizdin\u012f atpa\u017einimo test\u0105",audio_challenge:"Gauti garso atpa\u017einimo test\u0105",refresh_btn:"Gauti nauj\u0105 atpa\u017einimo test\u0105",instructions_visual:"\u012eveskite tekst\u0105:",instructions_audio:"\u012eveskite tai, k\u0105 girdite:",help_btn:"Pagalba",play_again:"Dar kart\u0105 paleisti gars\u0105",
-cant_hear_this:"Atsisi\u0173sti gars\u0105 kaip MP3",incorrect_try_again:"Neteisingai. Bandykite dar kart\u0105.",image_alt_text:"Testo \u201ereCAPTCHA\u201c vaizdas",privacy_and_terms:"Privatumas ir s\u0105lygos"},lv:{visual_challenge:"Sa\u0146emt vizu\u0101lu izaicin\u0101jumu",audio_challenge:"Sa\u0146emt audio izaicin\u0101jumu",refresh_btn:"Sa\u0146emt jaunu izaicin\u0101jumu",instructions_visual:"Ievadiet tekstu:",instructions_audio:"Ierakstiet dzirdamo:",help_btn:"Pal\u012bdz\u012bba",play_again:"V\u0113lreiz atska\u0146ot ska\u0146u",
-cant_hear_this:"Lejupiel\u0101d\u0113t ska\u0146u MP3\u00a0form\u0101t\u0101",incorrect_try_again:"Nepareizi. M\u0113\u0123iniet v\u0113lreiz.",image_alt_text:"reCAPTCHA izaicin\u0101juma att\u0113ls",privacy_and_terms:"Konfidencialit\u0101te un noteikumi"},ml:{visual_challenge:"\u0d12\u0d30\u0d41 \u0d26\u0d43\u0d36\u0d4d\u0d2f \u0d1a\u0d32\u0d1e\u0d4d\u0d1a\u0d4d \u0d28\u0d47\u0d1f\u0d41\u0d15",audio_challenge:"\u0d12\u0d30\u0d41 \u0d13\u0d21\u0d3f\u0d2f\u0d4b \u0d1a\u0d32\u0d1e\u0d4d\u0d1a\u0d4d \u0d28\u0d47\u0d1f\u0d41\u0d15",
-refresh_btn:"\u0d12\u0d30\u0d41 \u0d2a\u0d41\u0d24\u0d3f\u0d2f \u0d1a\u0d32\u0d1e\u0d4d\u0d1a\u0d4d \u0d28\u0d47\u0d1f\u0d41\u0d15",instructions_visual:"",instructions_audio:"\u0d15\u0d47\u0d7e\u0d15\u0d4d\u0d15\u0d41\u0d28\u0d4d\u0d28\u0d24\u0d4d \u0d1f\u0d48\u0d2a\u0d4d\u0d2a\u0d4d \u0d1a\u0d46\u0d2f\u0d4d\u0d2f\u0d42:",help_btn:"\u0d38\u0d39\u0d3e\u0d2f\u0d02",play_again:"\u0d36\u0d2c\u0d4d\u200c\u0d26\u0d02 \u0d35\u0d40\u0d23\u0d4d\u0d1f\u0d41\u0d02 \u0d2a\u0d4d\u0d32\u0d47 \u0d1a\u0d46\u0d2f\u0d4d\u0d2f\u0d41\u0d15",
-cant_hear_this:"\u0d36\u0d2c\u0d4d\u200c\u0d26\u0d02 MP3 \u0d06\u0d2f\u0d3f \u0d21\u0d57\u0d7a\u0d32\u0d4b\u0d21\u0d4d \u0d1a\u0d46\u0d2f\u0d4d\u0d2f\u0d41\u0d15",incorrect_try_again:"\u0d24\u0d46\u0d31\u0d4d\u0d31\u0d3e\u0d23\u0d4d. \u0d35\u0d40\u0d23\u0d4d\u0d1f\u0d41\u0d02 \u0d36\u0d4d\u0d30\u0d2e\u0d3f\u0d15\u0d4d\u0d15\u0d41\u0d15.",image_alt_text:"reCAPTCHA \u0d1a\u0d32\u0d1e\u0d4d\u0d1a\u0d4d \u0d07\u0d2e\u0d47\u0d1c\u0d4d",privacy_and_terms:"\u0d38\u0d4d\u0d35\u0d15\u0d3e\u0d30\u0d4d\u0d2f\u0d24\u0d2f\u0d41\u0d02 \u0d28\u0d3f\u0d2c\u0d28\u0d4d\u0d27\u0d28\u0d15\u0d33\u0d41\u0d02"},
-mr:{visual_challenge:"\u0926\u0943\u0936\u094d\u200d\u092f\u092e\u093e\u0928 \u0906\u0935\u094d\u0939\u093e\u0928 \u092a\u094d\u0930\u093e\u092a\u094d\u0924 \u0915\u0930\u093e",audio_challenge:"\u0911\u0921\u0940\u0913 \u0906\u0935\u094d\u0939\u093e\u0928 \u092a\u094d\u0930\u093e\u092a\u094d\u0924 \u0915\u0930\u093e",refresh_btn:"\u090f\u0915 \u0928\u0935\u0940\u0928 \u0906\u0935\u094d\u0939\u093e\u0928 \u092a\u094d\u0930\u093e\u092a\u094d\u0924 \u0915\u0930\u093e",instructions_visual:"",instructions_audio:"\u0906\u092a\u0932\u094d\u092f\u093e\u0932\u093e \u091c\u0947 \u0910\u0915\u0942 \u092f\u0947\u0908\u0932 \u0924\u0947 \u091f\u093e\u0907\u092a \u0915\u0930\u093e:",
-help_btn:"\u092e\u0926\u0924",play_again:"\u0927\u094d\u200d\u0935\u0928\u0940 \u092a\u0941\u0928\u094d\u0939\u093e \u092a\u094d\u200d\u0932\u0947 \u0915\u0930\u093e",cant_hear_this:"MP3 \u0930\u0941\u092a\u093e\u0924 \u0927\u094d\u200d\u0935\u0928\u0940 \u0921\u093e\u0909\u0928\u0932\u094b\u0921 \u0915\u0930\u093e",incorrect_try_again:"\u0905\u092f\u094b\u0917\u094d\u200d\u092f. \u092a\u0941\u0928\u094d\u200d\u0939\u093e \u092a\u094d\u0930\u092f\u0924\u094d\u200d\u0928 \u0915\u0930\u093e.",image_alt_text:"reCAPTCHA \u0906\u0935\u094d\u200d\u0939\u093e\u0928 \u092a\u094d\u0930\u0924\u093f\u092e\u093e",
-privacy_and_terms:"\u0917\u094b\u092a\u0928\u0940\u092f\u0924\u093e \u0906\u0923\u093f \u0905\u091f\u0940"},ms:{visual_challenge:"Dapatkan cabaran visual",audio_challenge:"Dapatkan cabaran audio",refresh_btn:"Dapatkan cabaran baru",instructions_visual:"Taipkan teksnya:",instructions_audio:"Taip apa yang didengari:",help_btn:"Bantuan",play_again:"Mainkan bunyi sekali lagi",cant_hear_this:"Muat turun bunyi sebagai MP3",incorrect_try_again:"Tidak betul. Cuba lagi.",image_alt_text:"Imej cabaran reCAPTCHA",
-privacy_and_terms:"Privasi & Syarat"},nl:{visual_challenge:"Een visuele uitdaging proberen",audio_challenge:"Een audio-uitdaging proberen",refresh_btn:"Een nieuwe uitdaging proberen",instructions_visual:"Typ de tekst:",instructions_audio:"Typ wat u hoort:",help_btn:"Help",play_again:"Geluid opnieuw afspelen",cant_hear_this:"Geluid downloaden als MP3",incorrect_try_again:"Onjuist. Probeer het opnieuw.",image_alt_text:"reCAPTCHA-uitdagingsafbeelding",privacy_and_terms:"Privacy en voorwaarden"},no:{visual_challenge:"F\u00e5 en bildeutfordring",
-audio_challenge:"F\u00e5 en lydutfordring",refresh_btn:"F\u00e5 en ny utfordring",instructions_visual:"Skriv inn teksten:",instructions_audio:"Skriv inn det du h\u00f8rer:",help_btn:"Hjelp",play_again:"Spill av lyd p\u00e5 nytt",cant_hear_this:"Last ned lyd som MP3",incorrect_try_again:"Feil. Pr\u00f8v p\u00e5 nytt.",image_alt_text:"reCAPTCHA-utfordringsbilde",privacy_and_terms:"Personvern og vilk\u00e5r"},pl:{visual_challenge:"Poka\u017c podpowied\u017a wizualn\u0105",audio_challenge:"Odtw\u00f3rz podpowied\u017a d\u017awi\u0119kow\u0105",
-refresh_btn:"Nowa podpowied\u017a",instructions_visual:"Przepisz tekst:",instructions_audio:"Wpisz us\u0142yszane s\u0142owa:",help_btn:"Pomoc",play_again:"Odtw\u00f3rz d\u017awi\u0119k ponownie",cant_hear_this:"Pobierz d\u017awi\u0119k jako plik MP3",incorrect_try_again:"Nieprawid\u0142owo. Spr\u00f3buj ponownie.",image_alt_text:"Zadanie obrazkowe reCAPTCHA",privacy_and_terms:"Prywatno\u015b\u0107 i warunki"},pt:pa,"pt-BR":pa,"pt-PT":{visual_challenge:"Obter um desafio visual",audio_challenge:"Obter um desafio de \u00e1udio",
-refresh_btn:"Obter um novo desafio",instructions_visual:"Introduza o texto:",instructions_audio:"Escreva o que ouvir:",help_btn:"Ajuda",play_again:"Reproduzir som novamente",cant_hear_this:"Transferir som como MP3",incorrect_try_again:"Incorreto. Tente novamente.",image_alt_text:"Imagem de teste reCAPTCHA",privacy_and_terms:"Privacidade e Termos de Utiliza\u00e7\u00e3o"},ro:qa,ru:{visual_challenge:"\u0412\u0438\u0437\u0443\u0430\u043b\u044c\u043d\u0430\u044f \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0430",
-audio_challenge:"\u0417\u0432\u0443\u043a\u043e\u0432\u0430\u044f \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0430",refresh_btn:"\u041e\u0431\u043d\u043e\u0432\u0438\u0442\u044c",instructions_visual:"\u0412\u0432\u0435\u0434\u0438\u0442\u0435 \u0442\u0435\u043a\u0441\u0442:",instructions_audio:"\u0412\u0432\u0435\u0434\u0438\u0442\u0435 \u0442\u043e, \u0447\u0442\u043e \u0441\u043b\u044b\u0448\u0438\u0442\u0435:",help_btn:"\u0421\u043f\u0440\u0430\u0432\u043a\u0430",play_again:"\u041f\u0440\u043e\u0441\u043b\u0443\u0448\u0430\u0442\u044c \u0435\u0449\u0435 \u0440\u0430\u0437",
-cant_hear_this:"\u0417\u0430\u0433\u0440\u0443\u0437\u0438\u0442\u044c MP3-\u0444\u0430\u0439\u043b",incorrect_try_again:"\u041d\u0435\u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u043e. \u041f\u043e\u0432\u0442\u043e\u0440\u0438\u0442\u0435 \u043f\u043e\u043f\u044b\u0442\u043a\u0443.",image_alt_text:"\u041f\u0440\u043e\u0432\u0435\u0440\u043a\u0430 \u043f\u043e \u0441\u043b\u043e\u0432\u0443 reCAPTCHA",privacy_and_terms:"\u041f\u0440\u0430\u0432\u0438\u043b\u0430 \u0438 \u043f\u0440\u0438\u043d\u0446\u0438\u043f\u044b"},
-sk:{visual_challenge:"Zobrazi\u0165 vizu\u00e1lnu podobu",audio_challenge:"Prehra\u0165 zvukov\u00fa podobu",refresh_btn:"Zobrazi\u0165 nov\u00fd v\u00fdraz",instructions_visual:"Zadajte text:",instructions_audio:"Zadajte, \u010do po\u010dujete:",help_btn:"Pomocn\u00edk",play_again:"Znova prehra\u0165 zvuk",cant_hear_this:"Prevzia\u0165 zvuk v podobe s\u00faboru MP3",incorrect_try_again:"Nespr\u00e1vne. Sk\u00faste to znova.",image_alt_text:"Obr\u00e1zok zadania reCAPTCHA",privacy_and_terms:"Ochrana osobn\u00fdch \u00fadajov a Zmluvn\u00e9 podmienky"},
-sl:{visual_challenge:"Vizualni preskus",audio_challenge:"Zvo\u010dni preskus",refresh_btn:"Nov preskus",instructions_visual:"Vnesite besedilo:",instructions_audio:"Natipkajte, kaj sli\u0161ite:",help_btn:"Pomo\u010d",play_again:"Znova predvajaj zvok",cant_hear_this:"Prenesi zvok kot MP3",incorrect_try_again:"Napa\u010dno. Poskusite znova.",image_alt_text:"Slika izziva reCAPTCHA",privacy_and_terms:"Zasebnost in pogoji"},sr:{visual_challenge:"\u041f\u0440\u0438\u043c\u0438\u0442\u0435 \u0432\u0438\u0437\u0443\u0435\u043b\u043d\u0438 \u0443\u043f\u0438\u0442",
-audio_challenge:"\u041f\u0440\u0438\u043c\u0438\u0442\u0435 \u0430\u0443\u0434\u0438\u043e \u0443\u043f\u0438\u0442",refresh_btn:"\u041f\u0440\u0438\u043c\u0438\u0442\u0435 \u043d\u043e\u0432\u0438 \u0443\u043f\u0438\u0442",instructions_visual:"\u0423\u043d\u0435\u0441\u0438\u0442\u0435 \u0442\u0435\u043a\u0441\u0442:",instructions_audio:"\u041e\u0442\u043a\u0443\u0446\u0430\u0458\u0442\u0435 \u043e\u043d\u043e \u0448\u0442\u043e \u0447\u0443\u0458\u0435\u0442\u0435:",help_btn:"\u041f\u043e\u043c\u043e\u045b",
-play_again:"\u041f\u043e\u043d\u043e\u0432\u043e \u043f\u0443\u0441\u0442\u0438 \u0437\u0432\u0443\u043a",cant_hear_this:"\u041f\u0440\u0435\u0443\u0437\u043c\u0438 \u0437\u0432\u0443\u043a \u043a\u0430\u043e MP3 \u0441\u043d\u0438\u043c\u0430\u043a",incorrect_try_again:"\u041d\u0435\u0442\u0430\u0447\u043d\u043e. \u041f\u043e\u043a\u0443\u0448\u0430\u0458\u0442\u0435 \u043f\u043e\u043d\u043e\u0432\u043e.",image_alt_text:"\u0421\u043b\u0438\u043a\u0430 reCAPTCHA \u043f\u0440\u043e\u0432\u0435\u0440\u0435",
-privacy_and_terms:"\u041f\u0440\u0438\u0432\u0430\u0442\u043d\u043e\u0441\u0442 \u0438 \u0443\u0441\u043b\u043e\u0432\u0438"},sv:{visual_challenge:"H\u00e4mta captcha i bildformat",audio_challenge:"H\u00e4mta captcha i ljudformat",refresh_btn:"H\u00e4mta ny captcha",instructions_visual:"Skriv texten:",instructions_audio:"Skriv det du h\u00f6r:",help_btn:"Hj\u00e4lp",play_again:"Spela upp ljudet igen",cant_hear_this:"H\u00e4mta ljud som MP3",incorrect_try_again:"Fel. F\u00f6rs\u00f6k igen.",image_alt_text:"reCAPTCHA-bild",
-privacy_and_terms:"Sekretess och villkor"},sw:{visual_challenge:"Pata herufi za kusoma",audio_challenge:"Pata herufi za kusikiliza",refresh_btn:"Pata herufi mpya",instructions_visual:"",instructions_audio:"Charaza unachosikia:",help_btn:"Usaidizi",play_again:"Cheza sauti tena",cant_hear_this:"Pakua sauti kama MP3",incorrect_try_again:"Sio sahihi. Jaribu tena.",image_alt_text:"picha ya changamoto ya reCAPTCHA",privacy_and_terms:"Faragha & Masharti"},ta:{visual_challenge:"\u0baa\u0bbe\u0bb0\u0bcd\u0bb5\u0bc8 \u0b9a\u0bc7\u0bb2\u0b9e\u0bcd\u0b9a\u0bc8\u0baa\u0bcd \u0baa\u0bc6\u0bb1\u0bc1\u0b95",
-audio_challenge:"\u0b86\u0b9f\u0bbf\u0baf\u0bcb \u0b9a\u0bc7\u0bb2\u0b9e\u0bcd\u0b9a\u0bc8\u0baa\u0bcd \u0baa\u0bc6\u0bb1\u0bc1\u0b95",refresh_btn:"\u0baa\u0bc1\u0ba4\u0bbf\u0baf \u0b9a\u0bc7\u0bb2\u0b9e\u0bcd\u0b9a\u0bc8\u0baa\u0bcd \u0baa\u0bc6\u0bb1\u0bc1\u0b95",instructions_visual:"",instructions_audio:"\u0b95\u0bc7\u0b9f\u0bcd\u0baa\u0ba4\u0bc8 \u0b9f\u0bc8\u0baa\u0bcd \u0b9a\u0bc6\u0baf\u0bcd\u0b95:",help_btn:"\u0b89\u0ba4\u0bb5\u0bbf",play_again:"\u0b92\u0bb2\u0bbf\u0baf\u0bc8 \u0bae\u0bc0\u0ba3\u0bcd\u0b9f\u0bc1\u0bae\u0bcd \u0b87\u0baf\u0b95\u0bcd\u0b95\u0bc1",
-cant_hear_this:"\u0b92\u0bb2\u0bbf\u0baf\u0bc8 MP3 \u0b86\u0b95 \u0baa\u0ba4\u0bbf\u0bb5\u0bbf\u0bb1\u0b95\u0bcd\u0b95\u0bc1\u0b95",incorrect_try_again:"\u0ba4\u0bb5\u0bb1\u0bbe\u0ba9\u0ba4\u0bc1. \u0bae\u0bc0\u0ba3\u0bcd\u0b9f\u0bc1\u0bae\u0bcd \u0bae\u0bc1\u0baf\u0bb2\u0bb5\u0bc1\u0bae\u0bcd.",image_alt_text:"reCAPTCHA \u0b9a\u0bc7\u0bb2\u0b9e\u0bcd\u0b9a\u0bcd \u0baa\u0b9f\u0bae\u0bcd",privacy_and_terms:"\u0ba4\u0ba9\u0bbf\u0baf\u0bc1\u0bb0\u0bbf\u0bae\u0bc8 & \u0bb5\u0bbf\u0ba4\u0bbf\u0bae\u0bc1\u0bb1\u0bc8\u0b95\u0bb3\u0bcd"},
-te:{visual_challenge:"\u0c12\u0c15 \u0c26\u0c43\u0c36\u0c4d\u0c2f\u0c2e\u0c3e\u0c28 \u0c38\u0c35\u0c3e\u0c32\u0c41\u0c28\u0c41 \u0c38\u0c4d\u0c35\u0c40\u0c15\u0c30\u0c3f\u0c02\u0c1a\u0c02\u0c21\u0c3f",audio_challenge:"\u0c12\u0c15 \u0c06\u0c21\u0c3f\u0c2f\u0c4b \u0c38\u0c35\u0c3e\u0c32\u0c41\u0c28\u0c41 \u0c38\u0c4d\u0c35\u0c40\u0c15\u0c30\u0c3f\u0c02\u0c1a\u0c02\u0c21\u0c3f",refresh_btn:"\u0c15\u0c4d\u0c30\u0c4a\u0c24\u0c4d\u0c24 \u0c38\u0c35\u0c3e\u0c32\u0c41\u0c28\u0c41 \u0c38\u0c4d\u0c35\u0c40\u0c15\u0c30\u0c3f\u0c02\u0c1a\u0c02\u0c21\u0c3f",
-instructions_visual:"",instructions_audio:"\u0c2e\u0c40\u0c30\u0c41 \u0c35\u0c3f\u0c28\u0c4d\u0c28\u0c26\u0c3f \u0c1f\u0c48\u0c2a\u0c4d \u0c1a\u0c47\u0c2f\u0c02\u0c21\u0c3f:",help_btn:"\u0c38\u0c39\u0c3e\u0c2f\u0c02",play_again:"\u0c27\u0c4d\u0c35\u0c28\u0c3f\u0c28\u0c3f \u0c2e\u0c33\u0c4d\u0c32\u0c40 \u0c2a\u0c4d\u0c32\u0c47 \u0c1a\u0c47\u0c2f\u0c3f",cant_hear_this:"\u0c27\u0c4d\u0c35\u0c28\u0c3f\u0c28\u0c3f MP3 \u0c35\u0c32\u0c46 \u0c21\u0c4c\u0c28\u0c4d\u200c\u0c32\u0c4b\u0c21\u0c4d \u0c1a\u0c47\u0c2f\u0c3f",
-incorrect_try_again:"\u0c24\u0c2a\u0c4d\u0c2a\u0c41. \u0c2e\u0c33\u0c4d\u0c32\u0c40 \u0c2a\u0c4d\u0c30\u0c2f\u0c24\u0c4d\u0c28\u0c3f\u0c02\u0c1a\u0c02\u0c21\u0c3f.",image_alt_text:"reCAPTCHA \u0c38\u0c35\u0c3e\u0c32\u0c41 \u0c1a\u0c3f\u0c24\u0c4d\u0c30\u0c02",privacy_and_terms:"\u0c17\u0c4b\u0c2a\u0c4d\u0c2f\u0c24 & \u0c28\u0c3f\u0c2c\u0c02\u0c27\u0c28\u0c32\u0c41"},th:{visual_challenge:"\u0e23\u0e31\u0e1a\u0e04\u0e27\u0e32\u0e21\u0e17\u0e49\u0e32\u0e17\u0e32\u0e22\u0e14\u0e49\u0e32\u0e19\u0e20\u0e32\u0e1e",
-audio_challenge:"\u0e23\u0e31\u0e1a\u0e04\u0e27\u0e32\u0e21\u0e17\u0e49\u0e32\u0e17\u0e32\u0e22\u0e14\u0e49\u0e32\u0e19\u0e40\u0e2a\u0e35\u0e22\u0e07",refresh_btn:"\u0e23\u0e31\u0e1a\u0e04\u0e27\u0e32\u0e21\u0e17\u0e49\u0e32\u0e17\u0e32\u0e22\u0e43\u0e2b\u0e21\u0e48",instructions_visual:"\u0e1e\u0e34\u0e21\u0e1e\u0e4c\u0e02\u0e49\u0e2d\u0e04\u0e27\u0e32\u0e21\u0e19\u0e35\u0e49:",instructions_audio:"\u0e1e\u0e34\u0e21\u0e1e\u0e4c\u0e2a\u0e34\u0e48\u0e07\u0e17\u0e35\u0e48\u0e04\u0e38\u0e13\u0e44\u0e14\u0e49\u0e22\u0e34\u0e19:",
-help_btn:"\u0e04\u0e27\u0e32\u0e21\u0e0a\u0e48\u0e27\u0e22\u0e40\u0e2b\u0e25\u0e37\u0e2d",play_again:"\u0e40\u0e25\u0e48\u0e19\u0e40\u0e2a\u0e35\u0e22\u0e07\u0e2d\u0e35\u0e01\u0e04\u0e23\u0e31\u0e49\u0e07",cant_hear_this:"\u0e14\u0e32\u0e27\u0e42\u0e2b\u0e25\u0e14\u0e40\u0e2a\u0e35\u0e22\u0e07\u0e40\u0e1b\u0e47\u0e19 MP3",incorrect_try_again:"\u0e44\u0e21\u0e48\u0e16\u0e39\u0e01\u0e15\u0e49\u0e2d\u0e07 \u0e25\u0e2d\u0e07\u0e2d\u0e35\u0e01\u0e04\u0e23\u0e31\u0e49\u0e07",image_alt_text:"\u0e23\u0e2b\u0e31\u0e2a\u0e20\u0e32\u0e1e reCAPTCHA",
-privacy_and_terms:"\u0e19\u0e42\u0e22\u0e1a\u0e32\u0e22\u0e2a\u0e48\u0e27\u0e19\u0e1a\u0e38\u0e04\u0e04\u0e25\u0e41\u0e25\u0e30\u0e02\u0e49\u0e2d\u0e01\u0e33\u0e2b\u0e19\u0e14"},tr:{visual_challenge:"G\u00f6rsel sorgu al",audio_challenge:"Sesli sorgu al",refresh_btn:"Yeniden y\u00fckle",instructions_visual:"Metni yaz\u0131n:",instructions_audio:"Duydu\u011funuzu yaz\u0131n:",help_btn:"Yard\u0131m",play_again:"Sesi tekrar \u00e7al",cant_hear_this:"Sesi MP3 olarak indir",incorrect_try_again:"Yanl\u0131\u015f. Tekrar deneyin.",
-image_alt_text:"reCAPTCHA sorusu resmi",privacy_and_terms:"Gizlilik ve \u015eartlar"},uk:{visual_challenge:"\u041e\u0442\u0440\u0438\u043c\u0430\u0442\u0438 \u0432\u0456\u0437\u0443\u0430\u043b\u044c\u043d\u0438\u0439 \u0442\u0435\u043a\u0441\u0442",audio_challenge:"\u041e\u0442\u0440\u0438\u043c\u0430\u0442\u0438 \u0430\u0443\u0434\u0456\u043e\u0437\u0430\u043f\u0438\u0441",refresh_btn:"\u041e\u043d\u043e\u0432\u0438\u0442\u0438 \u0442\u0435\u043a\u0441\u0442",instructions_visual:"\u0412\u0432\u0435\u0434\u0456\u0442\u044c \u0442\u0435\u043a\u0441\u0442:",
-instructions_audio:"\u0412\u0432\u0435\u0434\u0456\u0442\u044c \u043f\u043e\u0447\u0443\u0442\u0435:",help_btn:"\u0414\u043e\u0432\u0456\u0434\u043a\u0430",play_again:"\u0412\u0456\u0434\u0442\u0432\u043e\u0440\u0438\u0442\u0438 \u0437\u0430\u043f\u0438\u0441 \u0449\u0435 \u0440\u0430\u0437",cant_hear_this:"\u0417\u0430\u0432\u0430\u043d\u0442\u0430\u0436\u0438\u0442\u0438 \u0437\u0430\u043f\u0438\u0441 \u044f\u043a MP3",incorrect_try_again:"\u041d\u0435\u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u043e. \u0421\u043f\u0440\u043e\u0431\u0443\u0439\u0442\u0435 \u0449\u0435 \u0440\u0430\u0437.",
-image_alt_text:"\u0417\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u043d\u044f \u0437\u0430\u0432\u0434\u0430\u043d\u043d\u044f reCAPTCHA",privacy_and_terms:"\u041a\u043e\u043d\u0444\u0456\u0434\u0435\u043d\u0446\u0456\u0439\u043d\u0456\u0441\u0442\u044c \u0456 \u0443\u043c\u043e\u0432\u0438"},ur:{visual_challenge:"\u0627\u06cc\u06a9 \u0645\u0631\u0626\u06cc \u0686\u06cc\u0644\u0646\u062c \u062d\u0627\u0635\u0644 \u06a9\u0631\u06cc\u06ba",audio_challenge:"\u0627\u06cc\u06a9 \u0622\u0688\u06cc\u0648 \u0686\u06cc\u0644\u0646\u062c \u062d\u0627\u0635\u0644 \u06a9\u0631\u06cc\u06ba",
-refresh_btn:"\u0627\u06cc\u06a9 \u0646\u06cc\u0627 \u0686\u06cc\u0644\u0646\u062c \u062d\u0627\u0635\u0644 \u06a9\u0631\u06cc\u06ba",instructions_visual:"",instructions_audio:"\u062c\u0648 \u0633\u0646\u0627\u0626\u06cc \u062f\u06cc\u062a\u0627 \u06c1\u06d2 \u0648\u06c1 \u0679\u0627\u0626\u067e \u06a9\u0631\u06cc\u06ba:",help_btn:"\u0645\u062f\u062f",play_again:"\u0622\u0648\u0627\u0632 \u062f\u0648\u0628\u0627\u0631\u06c1 \u0686\u0644\u0627\u0626\u06cc\u06ba",cant_hear_this:"\u0622\u0648\u0627\u0632 \u06a9\u0648 MP3 \u06a9\u06d2 \u0628\u0637\u0648\u0631 \u0688\u0627\u0624\u0646 \u0644\u0648\u0688 \u06a9\u0631\u06cc\u06ba",
-incorrect_try_again:"\u063a\u0644\u0637\u06d4 \u062f\u0648\u0628\u0627\u0631\u06c1 \u06a9\u0648\u0634\u0634 \u06a9\u0631\u06cc\u06ba\u06d4",image_alt_text:"reCAPTCHA \u0686\u06cc\u0644\u0646\u062c \u0648\u0627\u0644\u06cc \u0634\u0628\u06cc\u06c1",privacy_and_terms:"\u0631\u0627\u0632\u062f\u0627\u0631\u06cc \u0648 \u0634\u0631\u0627\u0626\u0637"},vi:{visual_challenge:"Nh\u1eadn th\u1eed th\u00e1ch h\u00ecnh \u1ea3nh",audio_challenge:"Nh\u1eadn th\u1eed th\u00e1ch \u00e2m thanh",refresh_btn:"Nh\u1eadn th\u1eed th\u00e1ch m\u1edbi",
-instructions_visual:"Nh\u1eadp v\u0103n b\u1ea3n:",instructions_audio:"Nh\u1eadp n\u1ed9i dung b\u1ea1n nghe th\u1ea5y:",help_btn:"Tr\u1ee3 gi\u00fap",play_again:"Ph\u00e1t l\u1ea1i \u00e2m thanh",cant_hear_this:"T\u1ea3i \u00e2m thanh xu\u1ed1ng d\u01b0\u1edbi d\u1ea1ng MP3",incorrect_try_again:"Kh\u00f4ng ch\u00ednh x\u00e1c. H\u00e3y th\u1eed l\u1ea1i.",image_alt_text:"H\u00ecnh x\u00e1c th\u1ef1c reCAPTCHA",privacy_and_terms:"B\u1ea3o m\u1eadt v\u00e0 \u0111i\u1ec1u kho\u1ea3n"},"zh-CN":ra,"zh-HK":{visual_challenge:"\u56de\u7b54\u5716\u50cf\u9a57\u8b49\u554f\u984c",
-audio_challenge:"\u53d6\u5f97\u8a9e\u97f3\u9a57\u8b49\u554f\u984c",refresh_btn:"\u63db\u4e00\u500b\u9a57\u8b49\u554f\u984c",instructions_visual:"\u8f38\u5165\u6587\u5b57\uff1a",instructions_audio:"\u9375\u5165\u60a8\u6240\u807d\u5230\u7684\uff1a",help_btn:"\u8aaa\u660e",play_again:"\u518d\u6b21\u64ad\u653e\u8072\u97f3",cant_hear_this:"\u5c07\u8072\u97f3\u4e0b\u8f09\u70ba MP3",incorrect_try_again:"\u4e0d\u6b63\u78ba\uff0c\u518d\u8a66\u4e00\u6b21\u3002",image_alt_text:"reCAPTCHA \u9a57\u8b49\u6587\u5b57\u5716\u7247",
-privacy_and_terms:"\u79c1\u96b1\u6b0a\u8207\u689d\u6b3e"},"zh-TW":{visual_challenge:"\u53d6\u5f97\u5716\u7247\u9a57\u8b49\u554f\u984c",audio_challenge:"\u53d6\u5f97\u8a9e\u97f3\u9a57\u8b49\u554f\u984c",refresh_btn:"\u53d6\u5f97\u65b0\u7684\u9a57\u8b49\u554f\u984c",instructions_visual:"\u8acb\u8f38\u5165\u5716\u7247\u4e2d\u7684\u6587\u5b57\uff1a",instructions_audio:"\u8acb\u8f38\u5165\u8a9e\u97f3\u5167\u5bb9\uff1a",help_btn:"\u8aaa\u660e",play_again:"\u518d\u6b21\u64ad\u653e",cant_hear_this:"\u4ee5 MP3 \u683c\u5f0f\u4e0b\u8f09\u8072\u97f3",
-incorrect_try_again:"\u9a57\u8b49\u78bc\u6709\u8aa4\uff0c\u8acb\u518d\u8a66\u4e00\u6b21\u3002",image_alt_text:"reCAPTCHA \u9a57\u8b49\u6587\u5b57\u5716\u7247",privacy_and_terms:"\u96b1\u79c1\u6b0a\u8207\u689d\u6b3e"},zu:{visual_challenge:"Thola inselelo ebonakalayo",audio_challenge:"Thola inselelo yokulalelwayo",refresh_btn:"Thola inselelo entsha",instructions_visual:"",instructions_audio:"Bhala okuzwayo:",help_btn:"Usizo",play_again:"Phinda udlale okulalelwayo futhi",cant_hear_this:"Layisha umsindo njenge-MP3",
-incorrect_try_again:"Akulungile. Zama futhi.",image_alt_text:"umfanekiso oyinselelo we-reCAPTCHA",privacy_and_terms:"Okwangasese kanye nemigomo"},tl:la,he:oa,"in":na,mo:qa,zh:ra};var x=function(a){if(Error.captureStackTrace)Error.captureStackTrace(this,x);else{var b=Error().stack;b&&(this.stack=b)}a&&(this.message=String(a))};u(x,Error);x.prototype.name="CustomError";var ta;var ua=function(a,b){for(var c=a.split("%s"),d="",e=Array.prototype.slice.call(arguments,1);e.length&&1<c.length;)d+=c.shift()+e.shift();return d+c.join("%s")},va=String.prototype.trim?function(a){return a.trim()}:function(a){return a.replace(/^[\s\xa0]+|[\s\xa0]+$/g,"")},Da=function(a){if(!wa.test(a))return a;-1!=a.indexOf("&")&&(a=a.replace(xa,"&amp;"));-1!=a.indexOf("<")&&(a=a.replace(ya,"&lt;"));-1!=a.indexOf(">")&&(a=a.replace(za,"&gt;"));-1!=a.indexOf('"')&&(a=a.replace(Aa,"&quot;"));-1!=a.indexOf("'")&&
-(a=a.replace(Ba,"&#39;"));-1!=a.indexOf("\x00")&&(a=a.replace(Ca,"&#0;"));return a},xa=/&/g,ya=/</g,za=/>/g,Aa=/"/g,Ba=/'/g,Ca=/\x00/g,wa=/[\x00&<>"']/,Ea=function(a,b){return a<b?-1:a>b?1:0},Fa=function(a){return String(a).replace(/\-([a-z])/g,function(a,c){return c.toUpperCase()})},Ga=function(a){var b=q(void 0)?"undefined".replace(/([-()\[\]{}+?*.$\^|,:#<!\\])/g,"\\$1").replace(/\x08/g,"\\x08"):"\\s";return a.replace(new RegExp("(^"+(b?"|["+b+"]+":"")+")([a-z])","g"),function(a,b,e){return b+e.toUpperCase()})};var Ha=function(a,b){b.unshift(a);x.call(this,ua.apply(null,b));b.shift()};u(Ha,x);Ha.prototype.name="AssertionError";
-var Ia=function(a,b,c,d){var e="Assertion failed";if(c)var e=e+(": "+c),g=d;else a&&(e+=": "+a,g=b);throw new Ha(""+e,g||[]);},y=function(a,b,c){a||Ia("",null,b,Array.prototype.slice.call(arguments,2))},Ja=function(a,b){throw new Ha("Failure"+(a?": "+a:""),Array.prototype.slice.call(arguments,1));},Ka=function(a,b,c){q(a)||Ia("Expected string but got %s: %s.",[n(a),a],b,Array.prototype.slice.call(arguments,2));return a},La=function(a,b,c){r(a)||Ia("Expected function but got %s: %s.",[n(a),a],b,Array.prototype.slice.call(arguments,
-2))};var z=Array.prototype,Ma=z.indexOf?function(a,b,c){y(null!=a.length);return z.indexOf.call(a,b,c)}:function(a,b,c){c=null==c?0:0>c?Math.max(0,a.length+c):c;if(q(a))return q(b)&&1==b.length?a.indexOf(b,c):-1;for(;c<a.length;c++)if(c in a&&a[c]===b)return c;return-1},Na=z.forEach?function(a,b,c){y(null!=a.length);z.forEach.call(a,b,c)}:function(a,b,c){for(var d=a.length,e=q(a)?a.split(""):a,g=0;g<d;g++)g in e&&b.call(c,e[g],g,a)},Oa=z.map?function(a,b,c){y(null!=a.length);return z.map.call(a,b,c)}:
-function(a,b,c){for(var d=a.length,e=Array(d),g=q(a)?a.split(""):a,f=0;f<d;f++)f in g&&(e[f]=b.call(c,g[f],f,a));return e},Pa=z.some?function(a,b,c){y(null!=a.length);return z.some.call(a,b,c)}:function(a,b,c){for(var d=a.length,e=q(a)?a.split(""):a,g=0;g<d;g++)if(g in e&&b.call(c,e[g],g,a))return!0;return!1},Qa=function(a,b){var c=Ma(a,b),d;if(d=0<=c)y(null!=a.length),z.splice.call(a,c,1);return d},Ra=function(a){var b=a.length;if(0<b){for(var c=Array(b),d=0;d<b;d++)c[d]=a[d];return c}return[]},
-Sa=function(a,b,c){y(null!=a.length);return 2>=arguments.length?z.slice.call(a,b):z.slice.call(a,b,c)};var Ta=function(a,b){for(var c in a)b.call(void 0,a[c],c,a)},Ua=function(a){var b=[],c=0,d;for(d in a)b[c++]=d;return b},Va=function(a){for(var b in a)return!1;return!0},Xa=function(){var a=Wa()?k.google_ad:null,b={},c;for(c in a)b[c]=a[c];return b},Ya="constructor hasOwnProperty isPrototypeOf propertyIsEnumerable toLocaleString toString valueOf".split(" "),Za=function(a,b){for(var c,d,e=1;e<arguments.length;e++){d=arguments[e];for(c in d)a[c]=d[c];for(var g=0;g<Ya.length;g++)c=Ya[g],Object.prototype.hasOwnProperty.call(d,
-c)&&(a[c]=d[c])}},$a=function(a){var b=arguments.length;if(1==b&&p(arguments[0]))return $a.apply(null,arguments[0]);for(var c={},d=0;d<b;d++)c[arguments[d]]=!0;return c};var A;t:{var ab=k.navigator;if(ab){var bb=ab.userAgent;if(bb){A=bb;break t}}A=""}var B=function(a){return-1!=A.indexOf(a)};var cb=B("Opera")||B("OPR"),C=B("Trident")||B("MSIE"),D=B("Gecko")&&-1==A.toLowerCase().indexOf("webkit")&&!(B("Trident")||B("MSIE")),E=-1!=A.toLowerCase().indexOf("webkit"),db=function(){var a=k.document;return a?a.documentMode:void 0},eb=function(){var a="",b;if(cb&&k.opera)return a=k.opera.version,r(a)?a():a;D?b=/rv\:([^\);]+)(\)|;)/:C?b=/\b(?:MSIE|rv)[: ]([^\);]+)(\)|;)/:E&&(b=/WebKit\/(\S+)/);b&&(a=(a=b.exec(A))?a[1]:"");return C&&(b=db(),b>parseFloat(a))?String(b):a}(),fb={},F=function(a){var b;
-if(!(b=fb[a])){b=0;for(var c=va(String(eb)).split("."),d=va(String(a)).split("."),e=Math.max(c.length,d.length),g=0;0==b&&g<e;g++){var f=c[g]||"",m=d[g]||"",$=RegExp("(\\d*)(\\D*)","g"),K=RegExp("(\\d*)(\\D*)","g");do{var G=$.exec(f)||["","",""],aa=K.exec(m)||["","",""];if(0==G[0].length&&0==aa[0].length)break;b=Ea(0==G[1].length?0:parseInt(G[1],10),0==aa[1].length?0:parseInt(aa[1],10))||Ea(0==G[2].length,0==aa[2].length)||Ea(G[2],aa[2])}while(0==b)}b=fb[a]=0<=b}return b},gb=k.document,hb=gb&&C?db()||
-("CSS1Compat"==gb.compatMode?parseInt(eb,10):5):void 0;var ib=function(a){if(8192>a.length)return String.fromCharCode.apply(null,a);for(var b="",c=0;c<a.length;c+=8192)var d=Sa(a,c,c+8192),b=b+String.fromCharCode.apply(null,d);return b},jb=function(a){return Oa(a,function(a){a=a.toString(16);return 1<a.length?a:"0"+a}).join("")};var kb=null,lb=null,mb=function(a){if(!kb){kb={};lb={};for(var b=0;65>b;b++)kb[b]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=".charAt(b),lb[kb[b]]=b,62<=b&&(lb["ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.".charAt(b)]=b)}for(var b=lb,c=[],d=0;d<a.length;){var e=b[a.charAt(d++)],g=d<a.length?b[a.charAt(d)]:0;++d;var f=d<a.length?b[a.charAt(d)]:64;++d;var m=d<a.length?b[a.charAt(d)]:64;++d;if(null==e||null==g||null==f||null==m)throw Error();c.push(e<<2|g>>
-4);64!=f&&(c.push(g<<4&240|f>>2),64!=m&&c.push(f<<6&192|m))}return c};var H=function(){this.disposed_=this.disposed_;this.onDisposeCallbacks_=this.onDisposeCallbacks_};H.prototype.disposed_=!1;H.prototype.dispose=function(){this.disposed_||(this.disposed_=!0,this.disposeInternal())};var nb=function(a,b){a.onDisposeCallbacks_||(a.onDisposeCallbacks_=[]);a.onDisposeCallbacks_.push(l(void 0)?s(b,void 0):b)};H.prototype.disposeInternal=function(){if(this.onDisposeCallbacks_)for(;this.onDisposeCallbacks_.length;)this.onDisposeCallbacks_.shift()()};
-var ob=function(a){a&&"function"==typeof a.dispose&&a.dispose()};var pb=!C||C&&9<=hb;!D&&!C||C&&C&&9<=hb||D&&F("1.9.1");C&&F("9");var sb=function(a){return a?new qb(rb(a)):ta||(ta=new qb)},tb=function(a,b){return q(b)?a.getElementById(b):b},vb=function(a,b){Ta(b,function(b,d){"style"==d?a.style.cssText=b:"class"==d?a.className=b:"for"==d?a.htmlFor=b:d in ub?a.setAttribute(ub[d],b):0==d.lastIndexOf("aria-",0)||0==d.lastIndexOf("data-",0)?a.setAttribute(d,b):a[d]=b})},ub={cellpadding:"cellPadding",cellspacing:"cellSpacing",colspan:"colSpan",frameborder:"frameBorder",height:"height",maxlength:"maxLength",role:"role",rowspan:"rowSpan",
-type:"type",usemap:"useMap",valign:"vAlign",width:"width"},xb=function(a,b,c){return wb(document,arguments)},wb=function(a,b){var c=b[0],d=b[1];if(!pb&&d&&(d.name||d.type)){c=["<",c];d.name&&c.push(' name="',Da(d.name),'"');if(d.type){c.push(' type="',Da(d.type),'"');var e={};Za(e,d);delete e.type;d=e}c.push(">");c=c.join("")}c=a.createElement(c);d&&(q(d)?c.className=d:p(d)?c.className=d.join(" "):vb(c,d));2<b.length&&yb(a,c,b);return c},yb=function(a,b,c){function d(c){c&&b.appendChild(q(c)?a.createTextNode(c):
-c)}for(var e=2;e<c.length;e++){var g=c[e];!ca(g)||da(g)&&0<g.nodeType?d(g):Na(zb(g)?Ra(g):g,d)}},Ab=function(a){for(var b;b=a.firstChild;)a.removeChild(b)},Bb=function(a){a&&a.parentNode&&a.parentNode.removeChild(a)},rb=function(a){y(a,"Node cannot be null or undefined.");return 9==a.nodeType?a:a.ownerDocument||a.document},zb=function(a){if(a&&"number"==typeof a.length){if(da(a))return"function"==typeof a.item||"string"==typeof a.item;if(r(a))return"function"==typeof a.item}return!1},qb=function(a){this.document_=
-a||k.document||document};h=qb.prototype;h.getDomHelper=sb;h.getElement=function(a){return tb(this.document_,a)};h.$=qb.prototype.getElement;h.createDom=function(a,b,c){return wb(this.document_,arguments)};h.createElement=function(a){return this.document_.createElement(a)};h.createTextNode=function(a){return this.document_.createTextNode(String(a))};h.appendChild=function(a,b){a.appendChild(b)};var Cb=function(a){k.setTimeout(function(){throw a;},0)},Db,Eb=function(){var a=k.MessageChannel;"undefined"===typeof a&&"undefined"!==typeof window&&window.postMessage&&window.addEventListener&&(a=function(){var a=document.createElement("iframe");a.style.display="none";a.src="";document.documentElement.appendChild(a);var b=a.contentWindow,a=b.document;a.open();a.write("");a.close();var c="callImmediate"+Math.random(),d="file:"==b.location.protocol?"*":b.location.protocol+"//"+b.location.host,a=s(function(a){if(("*"==
-d||a.origin==d)&&a.data==c)this.port1.onmessage()},this);b.addEventListener("message",a,!1);this.port1={};this.port2={postMessage:function(){b.postMessage(c,d)}}});if("undefined"!==typeof a&&!B("Trident")&&!B("MSIE")){var b=new a,c={},d=c;b.port1.onmessage=function(){if(l(c.next)){c=c.next;var a=c.cb;c.cb=null;a()}};return function(a){d.next={cb:a};d=d.next;b.port2.postMessage(0)}}return"undefined"!==typeof document&&"onreadystatechange"in document.createElement("script")?function(a){var b=document.createElement("script");
-b.onreadystatechange=function(){b.onreadystatechange=null;b.parentNode.removeChild(b);b=null;a();a=null};document.documentElement.appendChild(b)}:function(a){k.setTimeout(a,0)}};var Kb=function(a,b){Fb||Gb();Hb||(Fb(),Hb=!0);Ib.push(new Jb(a,b))},Fb,Gb=function(){if(k.Promise&&k.Promise.resolve){var a=k.Promise.resolve();Fb=function(){a.then(Lb)}}else Fb=function(){var a=Lb;!r(k.setImmediate)||k.Window&&k.Window.prototype.setImmediate==k.setImmediate?(Db||(Db=Eb()),Db(a)):k.setImmediate(a)}},Hb=!1,Ib=[],Lb=function(){for(;Ib.length;){var a=Ib;Ib=[];for(var b=0;b<a.length;b++){var c=a[b];try{c.fn.call(c.scope)}catch(d){Cb(d)}}}Hb=!1},Jb=function(a,b){this.fn=a;this.scope=
-b};var Mb=function(a){a.prototype.then=a.prototype.then;a.prototype.$goog_Thenable=!0},Nb=function(a){if(!a)return!1;try{return!!a.$goog_Thenable}catch(b){return!1}};var L=function(a,b){this.state_=0;this.result_=void 0;this.callbackEntries_=this.parent_=null;this.hadUnhandledRejection_=this.executing_=!1;try{var c=this;a.call(b,function(a){I(c,2,a)},function(a){if(!(a instanceof J))try{if(a instanceof Error)throw a;throw Error("Promise rejected.");}catch(b){}I(c,3,a)})}catch(d){I(this,3,d)}};
-L.prototype.then=function(a,b,c){null!=a&&La(a,"opt_onFulfilled should be a function.");null!=b&&La(b,"opt_onRejected should be a function. Did you pass opt_context as the second argument instead of the third?");return Ob(this,r(a)?a:null,r(b)?b:null,c)};Mb(L);L.prototype.cancel=function(a){0==this.state_&&Kb(function(){var b=new J(a);Pb(this,b)},this)};
-var Pb=function(a,b){if(0==a.state_)if(a.parent_){var c=a.parent_;if(c.callbackEntries_){for(var d=0,e=-1,g=0,f;f=c.callbackEntries_[g];g++)if(f=f.child)if(d++,f==a&&(e=g),0<=e&&1<d)break;0<=e&&(0==c.state_&&1==d?Pb(c,b):(d=c.callbackEntries_.splice(e,1)[0],d.child&&Qb(c),d.onRejected(b)))}}else I(a,3,b)},Sb=function(a,b){a.callbackEntries_&&a.callbackEntries_.length||2!=a.state_&&3!=a.state_||Rb(a);a.callbackEntries_||(a.callbackEntries_=[]);a.callbackEntries_.push(b)},Ob=function(a,b,c,d){var e=
-{child:null,onFulfilled:null,onRejected:null};e.child=new L(function(a,f){e.onFulfilled=b?function(c){try{var e=b.call(d,c);a(e)}catch(K){f(K)}}:a;e.onRejected=c?function(b){try{var e=c.call(d,b);!l(e)&&b instanceof J?f(b):a(e)}catch(K){f(K)}}:f});e.child.parent_=a;Sb(a,e);return e.child};L.prototype.unblockAndFulfill_=function(a){y(1==this.state_);this.state_=0;I(this,2,a)};L.prototype.unblockAndReject_=function(a){y(1==this.state_);this.state_=0;I(this,3,a)};
-var I=function(a,b,c){if(0==a.state_){if(a==c)b=3,c=new TypeError("Promise cannot resolve to itself");else{if(Nb(c)){a.state_=1;c.then(a.unblockAndFulfill_,a.unblockAndReject_,a);return}if(da(c))try{var d=c.then;if(r(d)){Tb(a,c,d);return}}catch(e){b=3,c=e}}a.result_=c;a.state_=b;Rb(a);3!=b||c instanceof J||Ub(a,c)}},Tb=function(a,b,c){a.state_=1;var d=!1,e=function(b){d||(d=!0,a.unblockAndFulfill_(b))},g=function(b){d||(d=!0,a.unblockAndReject_(b))};try{c.call(b,e,g)}catch(f){g(f)}},Rb=function(a){a.executing_||
-(a.executing_=!0,Kb(a.executeCallbacks_,a))};L.prototype.executeCallbacks_=function(){for(;this.callbackEntries_&&this.callbackEntries_.length;){var a=this.callbackEntries_;this.callbackEntries_=[];for(var b=0;b<a.length;b++){var c=a[b],d=this.result_;if(2==this.state_)c.onFulfilled(d);else c.child&&Qb(this),c.onRejected(d)}}this.executing_=!1};
-var Qb=function(a){for(;a&&a.hadUnhandledRejection_;a=a.parent_)a.hadUnhandledRejection_=!1},Ub=function(a,b){a.hadUnhandledRejection_=!0;Kb(function(){a.hadUnhandledRejection_&&Vb.call(null,b)})},Vb=Cb,J=function(a){x.call(this,a)};u(J,x);J.prototype.name="cancel";/*
- Portions of this code are from MochiKit, received by
- The Closure Authors under the MIT license. All other code is Copyright
- 2005-2009 The Closure Authors. All Rights Reserved.
-*/
-var M=function(a,b){this.sequence_=[];this.onCancelFunction_=a;this.defaultScope_=b||null;this.hadError_=this.fired_=!1;this.result_=void 0;this.silentlyCanceled_=this.blocking_=this.blocked_=!1;this.unhandledErrorId_=0;this.parent_=null;this.branches_=0};
-M.prototype.cancel=function(a){if(this.fired_)this.result_ instanceof M&&this.result_.cancel();else{if(this.parent_){var b=this.parent_;delete this.parent_;a?b.cancel(a):(b.branches_--,0>=b.branches_&&b.cancel())}this.onCancelFunction_?this.onCancelFunction_.call(this.defaultScope_,this):this.silentlyCanceled_=!0;this.fired_||Wb(this,new Xb)}};M.prototype.continue_=function(a,b){this.blocked_=!1;Yb(this,a,b)};
-var Yb=function(a,b,c){a.fired_=!0;a.result_=c;a.hadError_=!b;Zb(a)},ac=function(a){if(a.fired_){if(!a.silentlyCanceled_)throw new $b;a.silentlyCanceled_=!1}};M.prototype.callback=function(a){ac(this);bc(a);Yb(this,!0,a)};var Wb=function(a,b){ac(a);bc(b);Yb(a,!1,b)},bc=function(a){y(!(a instanceof M),"An execution sequence may not be initiated with a blocking Deferred.")},cc=function(a,b,c,d){y(!a.blocking_,"Blocking Deferreds can not be re-used");a.sequence_.push([b,c,d]);a.fired_&&Zb(a)};
-M.prototype.then=function(a,b,c){var d,e,g=new L(function(a,b){d=a;e=b});cc(this,d,function(a){a instanceof Xb?g.cancel():e(a)});return g.then(a,b,c)};Mb(M);
-var dc=function(a){return Pa(a.sequence_,function(a){return r(a[1])})},Zb=function(a){if(a.unhandledErrorId_&&a.fired_&&dc(a)){var b=a.unhandledErrorId_,c=ec[b];c&&(k.clearTimeout(c.id_),delete ec[b]);a.unhandledErrorId_=0}a.parent_&&(a.parent_.branches_--,delete a.parent_);for(var b=a.result_,d=c=!1;a.sequence_.length&&!a.blocked_;){var e=a.sequence_.shift(),g=e[0],f=e[1],e=e[2];if(g=a.hadError_?f:g)try{var m=g.call(e||a.defaultScope_,b);l(m)&&(a.hadError_=a.hadError_&&(m==b||m instanceof Error),
-a.result_=b=m);Nb(b)&&(d=!0,a.blocked_=!0)}catch($){b=$,a.hadError_=!0,dc(a)||(c=!0)}}a.result_=b;d&&(m=s(a.continue_,a,!0),d=s(a.continue_,a,!1),b instanceof M?(cc(b,m,d),b.blocking_=!0):b.then(m,d));c&&(b=new fc(b),ec[b.id_]=b,a.unhandledErrorId_=b.id_)},$b=function(){x.call(this)};u($b,x);$b.prototype.message="Deferred has already fired";$b.prototype.name="AlreadyCalledError";var Xb=function(){x.call(this)};u(Xb,x);Xb.prototype.message="Deferred was canceled";Xb.prototype.name="CanceledError";
-var fc=function(a){this.id_=k.setTimeout(s(this.throwError,this),0);this.error_=a};fc.prototype.throwError=function(){y(ec[this.id_],"Cannot throw an error that is not scheduled.");delete ec[this.id_];throw this.error_;};var ec={};var kc=function(a){var b={},c=b.document||document,d=document.createElement("SCRIPT"),e={script_:d,timeout_:void 0},g=new M(gc,e),f=null,m=null!=b.timeout?b.timeout:5E3;0<m&&(f=window.setTimeout(function(){hc(d,!0);Wb(g,new ic(1,"Timeout reached for loading script "+a))},m),e.timeout_=f);d.onload=d.onreadystatechange=function(){d.readyState&&"loaded"!=d.readyState&&"complete"!=d.readyState||(hc(d,b.cleanupWhenDone||!1,f),g.callback(null))};d.onerror=function(){hc(d,!0,f);Wb(g,new ic(0,"Error while loading script "+
-a))};vb(d,{type:"text/javascript",charset:"UTF-8",src:a});jc(c).appendChild(d);return g},jc=function(a){var b=a.getElementsByTagName("HEAD");return b&&0!=b.length?b[0]:a.documentElement},gc=function(){if(this&&this.script_){var a=this.script_;a&&"SCRIPT"==a.tagName&&hc(a,!0,this.timeout_)}},hc=function(a,b,c){null!=c&&k.clearTimeout(c);a.onload=ba;a.onerror=ba;a.onreadystatechange=ba;b&&window.setTimeout(function(){Bb(a)},0)},ic=function(a,b){var c="Jsloader error (code #"+a+")";b&&(c+=": "+b);x.call(this,
-c);this.code=a};u(ic,x);var lc=function(a){lc[" "](a);return a};lc[" "]=ba;var mc=!C||C&&9<=hb,nc=C&&!F("9");!E||F("528");D&&F("1.9b")||C&&F("8")||cb&&F("9.5")||E&&F("528");D&&!F("8")||C&&F("9");var N=function(a,b){this.type=a;this.currentTarget=this.target=b;this.defaultPrevented=this.propagationStopped_=!1;this.returnValue_=!0};N.prototype.disposeInternal=function(){};N.prototype.dispose=function(){};N.prototype.preventDefault=function(){this.defaultPrevented=!0;this.returnValue_=!1};var O=function(a,b){N.call(this,a?a.type:"");this.relatedTarget=this.currentTarget=this.target=null;this.charCode=this.keyCode=this.button=this.screenY=this.screenX=this.clientY=this.clientX=this.offsetY=this.offsetX=0;this.metaKey=this.shiftKey=this.altKey=this.ctrlKey=!1;this.event_=this.state=null;if(a){var c=this.type=a.type;this.target=a.target||a.srcElement;this.currentTarget=b;var d=a.relatedTarget;if(d){if(D){var e;t:{try{lc(d.nodeName);e=!0;break t}catch(g){}e=!1}e||(d=null)}}else"mouseover"==
-c?d=a.fromElement:"mouseout"==c&&(d=a.toElement);this.relatedTarget=d;this.offsetX=E||void 0!==a.offsetX?a.offsetX:a.layerX;this.offsetY=E||void 0!==a.offsetY?a.offsetY:a.layerY;this.clientX=void 0!==a.clientX?a.clientX:a.pageX;this.clientY=void 0!==a.clientY?a.clientY:a.pageY;this.screenX=a.screenX||0;this.screenY=a.screenY||0;this.button=a.button;this.keyCode=a.keyCode||0;this.charCode=a.charCode||("keypress"==c?a.keyCode:0);this.ctrlKey=a.ctrlKey;this.altKey=a.altKey;this.shiftKey=a.shiftKey;this.metaKey=
-a.metaKey;this.state=a.state;this.event_=a;a.defaultPrevented&&this.preventDefault()}};u(O,N);O.prototype.preventDefault=function(){O.superClass_.preventDefault.call(this);var a=this.event_;if(a.preventDefault)a.preventDefault();else if(a.returnValue=!1,nc)try{if(a.ctrlKey||112<=a.keyCode&&123>=a.keyCode)a.keyCode=-1}catch(b){}};O.prototype.disposeInternal=function(){};var oc="closure_listenable_"+(1E6*Math.random()|0),pc=0;var qc=function(a,b,c,d,e){this.listener=a;this.proxy=null;this.src=b;this.type=c;this.capture=!!d;this.handler=e;this.key=++pc;this.removed=this.callOnce=!1},rc=function(a){a.removed=!0;a.listener=null;a.proxy=null;a.src=null;a.handler=null};var P=function(a){this.src=a;this.listeners={};this.typeCount_=0};P.prototype.add=function(a,b,c,d,e){var g=a.toString();a=this.listeners[g];a||(a=this.listeners[g]=[],this.typeCount_++);var f=sc(a,b,d,e);-1<f?(b=a[f],c||(b.callOnce=!1)):(b=new qc(b,this.src,g,!!d,e),b.callOnce=c,a.push(b));return b};
-P.prototype.remove=function(a,b,c,d){a=a.toString();if(!(a in this.listeners))return!1;var e=this.listeners[a];b=sc(e,b,c,d);return-1<b?(rc(e[b]),y(null!=e.length),z.splice.call(e,b,1),0==e.length&&(delete this.listeners[a],this.typeCount_--),!0):!1};var tc=function(a,b){var c=b.type;if(!(c in a.listeners))return!1;var d=Qa(a.listeners[c],b);d&&(rc(b),0==a.listeners[c].length&&(delete a.listeners[c],a.typeCount_--));return d};
-P.prototype.removeAll=function(a){a=a&&a.toString();var b=0,c;for(c in this.listeners)if(!a||c==a){for(var d=this.listeners[c],e=0;e<d.length;e++)++b,rc(d[e]);delete this.listeners[c];this.typeCount_--}return b};P.prototype.getListener=function(a,b,c,d){a=this.listeners[a.toString()];var e=-1;a&&(e=sc(a,b,c,d));return-1<e?a[e]:null};var sc=function(a,b,c,d){for(var e=0;e<a.length;++e){var g=a[e];if(!g.removed&&g.listener==b&&g.capture==!!c&&g.handler==d)return e}return-1};var uc="closure_lm_"+(1E6*Math.random()|0),vc={},wc=0,xc=function(a,b,c,d,e){if(p(b)){for(var g=0;g<b.length;g++)xc(a,b[g],c,d,e);return null}c=yc(c);if(a&&a[oc])a=a.listen(b,c,d,e);else{if(!b)throw Error("Invalid event type");var g=!!d,f=zc(a);f||(a[uc]=f=new P(a));c=f.add(b,c,!1,d,e);c.proxy||(d=Ac(),c.proxy=d,d.src=a,d.listener=c,a.addEventListener?a.addEventListener(b.toString(),d,g):a.attachEvent(Bc(b.toString()),d),wc++);a=c}return a},Ac=function(){var a=Cc,b=mc?function(c){return a.call(b.src,
-b.listener,c)}:function(c){c=a.call(b.src,b.listener,c);if(!c)return c};return b},Dc=function(a,b,c,d,e){if(p(b))for(var g=0;g<b.length;g++)Dc(a,b[g],c,d,e);else c=yc(c),a&&a[oc]?a.unlisten(b,c,d,e):a&&(a=zc(a))&&(b=a.getListener(b,c,!!d,e))&&Ec(b)},Ec=function(a){if("number"==typeof a||!a||a.removed)return!1;var b=a.src;if(b&&b[oc])return tc(b.eventTargetListeners_,a);var c=a.type,d=a.proxy;b.removeEventListener?b.removeEventListener(c,d,a.capture):b.detachEvent&&b.detachEvent(Bc(c),d);wc--;(c=zc(b))?
-(tc(c,a),0==c.typeCount_&&(c.src=null,b[uc]=null)):rc(a);return!0},Bc=function(a){return a in vc?vc[a]:vc[a]="on"+a},Gc=function(a,b,c,d){var e=1;if(a=zc(a))if(b=a.listeners[b.toString()])for(b=b.concat(),a=0;a<b.length;a++){var g=b[a];g&&g.capture==c&&!g.removed&&(e&=!1!==Fc(g,d))}return Boolean(e)},Fc=function(a,b){var c=a.listener,d=a.handler||a.src;a.callOnce&&Ec(a);return c.call(d,b)},Cc=function(a,b){if(a.removed)return!0;if(!mc){var c;if(!(c=b))t:{c=["window","event"];for(var d=k,e;e=c.shift();)if(null!=
-d[e])d=d[e];else{c=null;break t}c=d}e=c;c=new O(e,this);d=!0;if(!(0>e.keyCode||void 0!=e.returnValue)){t:{var g=!1;if(0==e.keyCode)try{e.keyCode=-1;break t}catch(f){g=!0}if(g||void 0==e.returnValue)e.returnValue=!0}e=[];for(g=c.currentTarget;g;g=g.parentNode)e.push(g);for(var g=a.type,m=e.length-1;!c.propagationStopped_&&0<=m;m--)c.currentTarget=e[m],d&=Gc(e[m],g,!0,c);for(m=0;!c.propagationStopped_&&m<e.length;m++)c.currentTarget=e[m],d&=Gc(e[m],g,!1,c)}return d}return Fc(a,new O(b,this))},zc=function(a){a=
-a[uc];return a instanceof P?a:null},Hc="__closure_events_fn_"+(1E9*Math.random()>>>0),yc=function(a){y(a,"Listener can not be null.");if(r(a))return a;y(a.handleEvent,"An object listener must have handleEvent method.");a[Hc]||(a[Hc]=function(b){return a.handleEvent(b)});return a[Hc]};var Q=function(a){H.call(this);this.handler_=a;this.keys_={}};u(Q,H);var Ic=[];h=Q.prototype;h.listen=function(a,b,c,d){p(b)||(b&&(Ic[0]=b.toString()),b=Ic);for(var e=0;e<b.length;e++){var g=xc(a,b[e],c||this.handleEvent,d||!1,this.handler_||this);if(!g)break;this.keys_[g.key]=g}return this};
-h.unlisten=function(a,b,c,d,e){if(p(b))for(var g=0;g<b.length;g++)this.unlisten(a,b[g],c,d,e);else c=c||this.handleEvent,e=e||this.handler_||this,c=yc(c),d=!!d,b=a&&a[oc]?a.getListener(b,c,d,e):a?(a=zc(a))?a.getListener(b,c,d,e):null:null,b&&(Ec(b),delete this.keys_[b.key]);return this};h.removeAll=function(){Ta(this.keys_,Ec);this.keys_={}};h.disposeInternal=function(){Q.superClass_.disposeInternal.call(this);this.removeAll()};
-h.handleEvent=function(){throw Error("EventHandler.handleEvent not implemented");};var R=function(){H.call(this);this.eventTargetListeners_=new P(this);this.actualEventTarget_=this;this.parentEventTarget_=null};u(R,H);R.prototype[oc]=!0;h=R.prototype;h.setParentEventTarget=function(a){this.parentEventTarget_=a};h.addEventListener=function(a,b,c,d){xc(this,a,b,c,d)};h.removeEventListener=function(a,b,c,d){Dc(this,a,b,c,d)};
-h.dispatchEvent=function(a){Jc(this);var b,c=this.parentEventTarget_;if(c){b=[];for(var d=1;c;c=c.parentEventTarget_)b.push(c),y(1E3>++d,"infinite loop")}c=this.actualEventTarget_;d=a.type||a;if(q(a))a=new N(a,c);else if(a instanceof N)a.target=a.target||c;else{var e=a;a=new N(d,c);Za(a,e)}var e=!0,g;if(b)for(var f=b.length-1;!a.propagationStopped_&&0<=f;f--)g=a.currentTarget=b[f],e=Kc(g,d,!0,a)&&e;a.propagationStopped_||(g=a.currentTarget=c,e=Kc(g,d,!0,a)&&e,a.propagationStopped_||(e=Kc(g,d,!1,a)&&
-e));if(b)for(f=0;!a.propagationStopped_&&f<b.length;f++)g=a.currentTarget=b[f],e=Kc(g,d,!1,a)&&e;return e};h.disposeInternal=function(){R.superClass_.disposeInternal.call(this);this.eventTargetListeners_&&this.eventTargetListeners_.removeAll(void 0);this.parentEventTarget_=null};h.listen=function(a,b,c,d){Jc(this);return this.eventTargetListeners_.add(String(a),b,!1,c,d)};h.unlisten=function(a,b,c,d){return this.eventTargetListeners_.remove(String(a),b,c,d)};
-var Kc=function(a,b,c,d){b=a.eventTargetListeners_.listeners[String(b)];if(!b)return!0;b=b.concat();for(var e=!0,g=0;g<b.length;++g){var f=b[g];if(f&&!f.removed&&f.capture==c){var m=f.listener,$=f.handler||f.src;f.callOnce&&tc(a.eventTargetListeners_,f);e=!1!==m.call($,d)&&e}}return e&&0!=d.returnValue_};R.prototype.getListener=function(a,b,c,d){return this.eventTargetListeners_.getListener(String(a),b,c,d)};var Jc=function(a){y(a.eventTargetListeners_,"Event target is not initialized. Did you call the superclass (goog.events.EventTarget) constructor?")};var S=function(a){R.call(this);this.imageIdToRequestMap_={};this.imageIdToImageMap_={};this.handler_=new Q(this);this.parent_=a};u(S,R);var Lc=[C&&!F("11")?"readystatechange":"load","abort","error"],Mc=function(a,b,c){(c=q(c)?c:c.src)&&(a.imageIdToRequestMap_[b]={src:c,corsRequestType:l(void 0)?void 0:null})};
-S.prototype.start=function(){var a=this.imageIdToRequestMap_;Na(Ua(a),function(b){var c=a[b];if(c&&(delete a[b],!this.disposed_)){var d;d=this.parent_?sb(this.parent_).createDom("img"):new Image;c.corsRequestType&&(d.crossOrigin=c.corsRequestType);this.handler_.listen(d,Lc,this.onNetworkEvent_);this.imageIdToImageMap_[b]=d;d.id=b;d.src=c.src}},this)};
-S.prototype.onNetworkEvent_=function(a){var b=a.currentTarget;if(b){if("readystatechange"==a.type)if("complete"==b.readyState)a.type="load";else return;"undefined"==typeof b.naturalWidth&&("load"==a.type?(b.naturalWidth=b.width,b.naturalHeight=b.height):(b.naturalWidth=0,b.naturalHeight=0));this.dispatchEvent({type:a.type,target:b});!this.disposed_&&(a=b.id,delete this.imageIdToRequestMap_[a],b=this.imageIdToImageMap_[a])&&(delete this.imageIdToImageMap_[a],this.handler_.unlisten(b,Lc,this.onNetworkEvent_),
-Va(this.imageIdToImageMap_)&&Va(this.imageIdToRequestMap_)&&this.dispatchEvent("complete"))}};S.prototype.disposeInternal=function(){delete this.imageIdToRequestMap_;delete this.imageIdToImageMap_;ob(this.handler_);S.superClass_.disposeInternal.call(this)};var T=function(){};T.getInstance=function(){return T.instance_?T.instance_:T.instance_=new T};T.prototype.nextId_=0;var U=function(a){R.call(this);this.dom_=a||sb();this.id_=null;this.inDocument_=!1;this.element_=null;this.googUiComponentHandler_=void 0;this.childIndex_=this.children_=this.parent_=null;this.wasDecorated_=!1};u(U,R);h=U.prototype;h.idGenerator_=T.getInstance();h.getElement=function(){return this.element_};h.setParentEventTarget=function(a){if(this.parent_&&this.parent_!=a)throw Error("Method not supported");U.superClass_.setParentEventTarget.call(this,a)};h.getDomHelper=function(){return this.dom_};
-h.createDom=function(){this.element_=this.dom_.createElement("div")};
-var Oc=function(a,b){if(a.inDocument_)throw Error("Component already rendered");a.element_||a.createDom();b?b.insertBefore(a.element_,null):a.dom_.document_.body.appendChild(a.element_);a.parent_&&!a.parent_.inDocument_||Nc(a)},Nc=function(a){a.inDocument_=!0;Pc(a,function(a){!a.inDocument_&&a.getElement()&&Nc(a)})},Qc=function(a){Pc(a,function(a){a.inDocument_&&Qc(a)});a.googUiComponentHandler_&&a.googUiComponentHandler_.removeAll();a.inDocument_=!1};
-U.prototype.disposeInternal=function(){this.inDocument_&&Qc(this);this.googUiComponentHandler_&&(this.googUiComponentHandler_.dispose(),delete this.googUiComponentHandler_);Pc(this,function(a){a.dispose()});!this.wasDecorated_&&this.element_&&Bb(this.element_);this.parent_=this.element_=this.childIndex_=this.children_=null;U.superClass_.disposeInternal.call(this)};var Pc=function(a,b){a.children_&&Na(a.children_,b,void 0)};
-U.prototype.removeChild=function(a,b){if(a){var c=q(a)?a:a.id_||(a.id_=":"+(a.idGenerator_.nextId_++).toString(36)),d;this.childIndex_&&c?(d=this.childIndex_,d=(c in d?d[c]:void 0)||null):d=null;a=d;if(c&&a){d=this.childIndex_;c in d&&delete d[c];Qa(this.children_,a);b&&(Qc(a),a.element_&&Bb(a.element_));c=a;if(null==c)throw Error("Unable to set parent component");c.parent_=null;U.superClass_.setParentEventTarget.call(c,null)}}if(!a)throw Error("Child is not in parent component");return a};var V=function(a,b,c){U.call(this,c);this.captchaImage_=a;this.adImage_=b&&300==b.naturalWidth&&57==b.naturalHeight?b:null};u(V,U);V.prototype.createDom=function(){V.superClass_.createDom.call(this);var a=this.getElement();this.captchaImage_.alt=W.image_alt_text;this.getDomHelper().appendChild(a,this.captchaImage_);this.adImage_&&(this.adImage_.alt=W.image_alt_text,this.getDomHelper().appendChild(a,this.adImage_),this.adImage_&&Rc(this.adImage_)&&(a.innerHTML+='<div id="recaptcha-ad-choices"><div class="recaptcha-ad-choices-collapsed"><img height="15" width="30" alt="AdChoices" border="0" src="//www.gstatic.com/recaptcha/api/img/adicon.png"/></div><div class="recaptcha-ad-choices-expanded"><a href="https://support.google.com/adsense/troubleshooter/1631343" target="_blank"><img height="15" width="75" alt="AdChoices" border="0" src="//www.gstatic.com/recaptcha/api/img/adchoices.png"/></a></div></div>'))};
-var Rc=function(a){var b=Sc(a,"visibility");a=Sc(a,"display");return"hidden"!=b&&"none"!=a},Sc=function(a,b){var c;t:{c=rb(a);if(c.defaultView&&c.defaultView.getComputedStyle&&(c=c.defaultView.getComputedStyle(a,null))){c=c[b]||c.getPropertyValue(b)||"";break t}c=""}if(!(c=c||(a.currentStyle?a.currentStyle[b]:null))&&(c=a.style[Fa(b)],"undefined"===typeof c)){c=a.style;var d;t:if(d=Fa(b),void 0===a.style[d]){var e=(E?"Webkit":D?"Moz":C?"ms":cb?"O":null)+Ga(d);if(void 0!==a.style[e]){d=e;break t}}c=
-c[d]||""}return c};V.prototype.disposeInternal=function(){delete this.captchaImage_;delete this.adImage_;V.superClass_.disposeInternal.call(this)};var Tc=function(a,b,c){H.call(this);this.listener_=a;this.interval_=b||0;this.handler_=c;this.callback_=s(this.doAction_,this)};u(Tc,H);h=Tc.prototype;h.id_=0;h.disposeInternal=function(){Tc.superClass_.disposeInternal.call(this);this.stop();delete this.listener_;delete this.handler_};
-h.start=function(a){this.stop();var b=this.callback_;a=l(a)?a:this.interval_;if(!r(b))if(b&&"function"==typeof b.handleEvent)b=s(b.handleEvent,b);else throw Error("Invalid listener argument");this.id_=2147483647<a?-1:k.setTimeout(b,a||0)};h.stop=function(){this.isActive()&&k.clearTimeout(this.id_);this.id_=0};h.isActive=function(){return 0!=this.id_};h.doAction_=function(){this.id_=0;this.listener_&&this.listener_.call(this.handler_)};var Uc=function(a,b){H.call(this);this.listener_=a;this.handler_=b;this.delay_=new Tc(s(this.onTick_,this),0,this)};u(Uc,H);h=Uc.prototype;h.interval_=0;h.runUntil_=0;h.disposeInternal=function(){this.delay_.dispose();delete this.listener_;delete this.handler_;Uc.superClass_.disposeInternal.call(this)};h.start=function(a,b){this.stop();var c=b||0;this.interval_=Math.max(a||0,0);this.runUntil_=0>c?-1:ha()+c;this.delay_.start(0>c?this.interval_:Math.min(this.interval_,c))};h.stop=function(){this.delay_.stop()};
-h.isActive=function(){return this.delay_.isActive()};h.onSuccess=function(){};h.onFailure=function(){};h.onTick_=function(){if(this.listener_.call(this.handler_))this.onSuccess();else if(0>this.runUntil_)this.delay_.start(this.interval_);else{var a=this.runUntil_-ha();if(0>=a)this.onFailure();else this.delay_.start(Math.min(this.interval_,a))}};$a("area base br col command embed hr img input keygen link meta param source track wbr".split(" "));$a("action","cite","data","formaction","href","manifest","poster","src");$a("link","script","style");var Vc={sanitizedContentKindHtml:!0},Wc={sanitizedContentKindText:!0},Xc=function(){throw Error("Do not instantiate directly");};Xc.prototype.contentDir=null;Xc.prototype.toString=function(){return this.content};var bd=function(a){var b=Yc;y(b,"Soy template may not be null.");var c=sb().createElement("DIV");a=Zc(b(a||$c,void 0,void 0));b=a.match(ad);y(!b,"This template starts with a %s, which cannot be a child of a <div>, as required by soy internals. Consider using goog.soy.renderElement instead.\nTemplate output: %s",b&&b[0],a);c.innerHTML=a;return 1==c.childNodes.length&&(a=c.firstChild,1==a.nodeType)?a:c},Zc=function(a){if(!da(a))return String(a);if(a instanceof Xc){if(a.contentKind===Vc)return Ka(a.content);
-if(a.contentKind===Wc)return Da(a.content)}Ja("Soy template output is unsafe for use as HTML: "+a);return"zSoyz"},ad=/^<(body|caption|col|colgroup|head|html|tr|td|tbody|thead|tfoot)>/i,$c={};C&&F(8);var cd=function(){Xc.call(this)};u(cd,Xc);cd.prototype.contentKind=Vc;var dd=function(a){function b(a){this.content=a}b.prototype=a.prototype;return function(a,d){var e=new b(String(a));void 0!==d&&(e.contentDir=d);return e}}(cd);(function(a){function b(a){this.content=a}b.prototype=a.prototype;return function(a,d){var e=String(a);if(!e)return"";e=new b(e);void 0!==d&&(e.contentDir=d);return e}})(cd);
-var ed={"\x00":"\\x00","\b":"\\x08","\t":"\\t","\n":"\\n","\x0B":"\\x0b","\f":"\\f","\r":"\\r",'"':"\\x22",$:"\\x24","&":"\\x26","'":"\\x27","(":"\\x28",")":"\\x29","*":"\\x2a","+":"\\x2b",",":"\\x2c","-":"\\x2d",".":"\\x2e","/":"\\/",":":"\\x3a","<":"\\x3c","=":"\\x3d",">":"\\x3e","?":"\\x3f","[":"\\x5b","\\":"\\\\","]":"\\x5d","^":"\\x5e","{":"\\x7b","|":"\\x7c","}":"\\x7d","\u0085":"\\x85","\u2028":"\\u2028","\u2029":"\\u2029"},fd=function(a){return ed[a]},gd=/[\x00\x08-\x0d\x22\x26\x27\/\x3c-\x3e\\\x85\u2028\u2029]/g;var Yc=function(a){return dd('<script type="text/javascript">var challenge = \''+String(a.challenge).replace(gd,fd)+"'; var publisherId = '"+String(a.publisherId).replace(gd,fd)+"';"+("ca-mongoogle"==a.publisherId?'google_page_url = "3pcerttesting.com/dab/recaptcha.html";':"")+"\n    google_ad_client = publisherId;\n    google_ad_type = 'html';\n    google_ad_output = 'js';\n    google_image_size = '300x57';\n    google_captcha_token = challenge;\n    google_ad_request_done = function(ad) {\n      window.parent.recaptcha.ads.adutils.googleAdRequestDone(ad);\n    };\n    \x3c/script><script type=\"text/javascript\" src=\"//pagead2.googlesyndication.com/pagead/show_ads.js\">\x3c/script>")};
-Yc.soyTemplateName="recaptcha.soy.ads.iframeAdsLoader.main";var Wa=function(){var a=k.google_ad;return!!(a&&a.token&&a.imageAdUrl&&a.hashedAnswer&&a.salt&&a.delayedImpressionUrl&&a.engagementUrl)},hd=function(){k.google_ad&&(k.google_ad=null)},id=function(a){a=a||document.body;var b=k.google_ad;b&&b.searchUpliftUrl&&(b=xb("iframe",{src:'data:text/html;charset=utf-8,<body><img src="https://'+b.searchUpliftUrl+'"></img></body>',style:"display:none"}),a.appendChild(b))},jd=0,kd=function(a){var b=new S;Mc(b,"recaptcha-url-"+jd++,a);b.start()},ld=function(a,b){var c=
-RecaptchaState.publisher_id;hd();var d=xb("iframe",{id:"recaptcha-loader-"+jd++,style:"display: none"});document.body.appendChild(d);var e=d.contentWindow?d.contentWindow.document:d.contentDocument;e.open("text/html","replace");e.write(bd({challenge:a,publisherId:c}).innerHTML);e.close();c=new Uc(function(){return!!k.google_ad});c.onSuccess=function(){Bb(d);b()};c.onFailure=function(){Bb(d);b()};c.start(50,2E3)};t("recaptcha.ads.adutils.googleAdRequestDone",function(a){k.google_ad=a});var md=function(){this.blockSize=-1};var nd=function(){this.blockSize=-1;this.blockSize=64;this.chain_=Array(4);this.block_=Array(this.blockSize);this.totalLength_=this.blockLength_=0;this.reset()};u(nd,md);nd.prototype.reset=function(){this.chain_[0]=1732584193;this.chain_[1]=4023233417;this.chain_[2]=2562383102;this.chain_[3]=271733878;this.totalLength_=this.blockLength_=0};
-var od=function(a,b,c){c||(c=0);var d=Array(16);if(q(b))for(var e=0;16>e;++e)d[e]=b.charCodeAt(c++)|b.charCodeAt(c++)<<8|b.charCodeAt(c++)<<16|b.charCodeAt(c++)<<24;else for(e=0;16>e;++e)d[e]=b[c++]|b[c++]<<8|b[c++]<<16|b[c++]<<24;b=a.chain_[0];c=a.chain_[1];var e=a.chain_[2],g=a.chain_[3],f=0,f=b+(g^c&(e^g))+d[0]+3614090360&4294967295;b=c+(f<<7&4294967295|f>>>25);f=g+(e^b&(c^e))+d[1]+3905402710&4294967295;g=b+(f<<12&4294967295|f>>>20);f=e+(c^g&(b^c))+d[2]+606105819&4294967295;e=g+(f<<17&4294967295|
-f>>>15);f=c+(b^e&(g^b))+d[3]+3250441966&4294967295;c=e+(f<<22&4294967295|f>>>10);f=b+(g^c&(e^g))+d[4]+4118548399&4294967295;b=c+(f<<7&4294967295|f>>>25);f=g+(e^b&(c^e))+d[5]+1200080426&4294967295;g=b+(f<<12&4294967295|f>>>20);f=e+(c^g&(b^c))+d[6]+2821735955&4294967295;e=g+(f<<17&4294967295|f>>>15);f=c+(b^e&(g^b))+d[7]+4249261313&4294967295;c=e+(f<<22&4294967295|f>>>10);f=b+(g^c&(e^g))+d[8]+1770035416&4294967295;b=c+(f<<7&4294967295|f>>>25);f=g+(e^b&(c^e))+d[9]+2336552879&4294967295;g=b+(f<<12&4294967295|
-f>>>20);f=e+(c^g&(b^c))+d[10]+4294925233&4294967295;e=g+(f<<17&4294967295|f>>>15);f=c+(b^e&(g^b))+d[11]+2304563134&4294967295;c=e+(f<<22&4294967295|f>>>10);f=b+(g^c&(e^g))+d[12]+1804603682&4294967295;b=c+(f<<7&4294967295|f>>>25);f=g+(e^b&(c^e))+d[13]+4254626195&4294967295;g=b+(f<<12&4294967295|f>>>20);f=e+(c^g&(b^c))+d[14]+2792965006&4294967295;e=g+(f<<17&4294967295|f>>>15);f=c+(b^e&(g^b))+d[15]+1236535329&4294967295;c=e+(f<<22&4294967295|f>>>10);f=b+(e^g&(c^e))+d[1]+4129170786&4294967295;b=c+(f<<
-5&4294967295|f>>>27);f=g+(c^e&(b^c))+d[6]+3225465664&4294967295;g=b+(f<<9&4294967295|f>>>23);f=e+(b^c&(g^b))+d[11]+643717713&4294967295;e=g+(f<<14&4294967295|f>>>18);f=c+(g^b&(e^g))+d[0]+3921069994&4294967295;c=e+(f<<20&4294967295|f>>>12);f=b+(e^g&(c^e))+d[5]+3593408605&4294967295;b=c+(f<<5&4294967295|f>>>27);f=g+(c^e&(b^c))+d[10]+38016083&4294967295;g=b+(f<<9&4294967295|f>>>23);f=e+(b^c&(g^b))+d[15]+3634488961&4294967295;e=g+(f<<14&4294967295|f>>>18);f=c+(g^b&(e^g))+d[4]+3889429448&4294967295;c=
-e+(f<<20&4294967295|f>>>12);f=b+(e^g&(c^e))+d[9]+568446438&4294967295;b=c+(f<<5&4294967295|f>>>27);f=g+(c^e&(b^c))+d[14]+3275163606&4294967295;g=b+(f<<9&4294967295|f>>>23);f=e+(b^c&(g^b))+d[3]+4107603335&4294967295;e=g+(f<<14&4294967295|f>>>18);f=c+(g^b&(e^g))+d[8]+1163531501&4294967295;c=e+(f<<20&4294967295|f>>>12);f=b+(e^g&(c^e))+d[13]+2850285829&4294967295;b=c+(f<<5&4294967295|f>>>27);f=g+(c^e&(b^c))+d[2]+4243563512&4294967295;g=b+(f<<9&4294967295|f>>>23);f=e+(b^c&(g^b))+d[7]+1735328473&4294967295;
-e=g+(f<<14&4294967295|f>>>18);f=c+(g^b&(e^g))+d[12]+2368359562&4294967295;c=e+(f<<20&4294967295|f>>>12);f=b+(c^e^g)+d[5]+4294588738&4294967295;b=c+(f<<4&4294967295|f>>>28);f=g+(b^c^e)+d[8]+2272392833&4294967295;g=b+(f<<11&4294967295|f>>>21);f=e+(g^b^c)+d[11]+1839030562&4294967295;e=g+(f<<16&4294967295|f>>>16);f=c+(e^g^b)+d[14]+4259657740&4294967295;c=e+(f<<23&4294967295|f>>>9);f=b+(c^e^g)+d[1]+2763975236&4294967295;b=c+(f<<4&4294967295|f>>>28);f=g+(b^c^e)+d[4]+1272893353&4294967295;g=b+(f<<11&4294967295|
-f>>>21);f=e+(g^b^c)+d[7]+4139469664&4294967295;e=g+(f<<16&4294967295|f>>>16);f=c+(e^g^b)+d[10]+3200236656&4294967295;c=e+(f<<23&4294967295|f>>>9);f=b+(c^e^g)+d[13]+681279174&4294967295;b=c+(f<<4&4294967295|f>>>28);f=g+(b^c^e)+d[0]+3936430074&4294967295;g=b+(f<<11&4294967295|f>>>21);f=e+(g^b^c)+d[3]+3572445317&4294967295;e=g+(f<<16&4294967295|f>>>16);f=c+(e^g^b)+d[6]+76029189&4294967295;c=e+(f<<23&4294967295|f>>>9);f=b+(c^e^g)+d[9]+3654602809&4294967295;b=c+(f<<4&4294967295|f>>>28);f=g+(b^c^e)+d[12]+
-3873151461&4294967295;g=b+(f<<11&4294967295|f>>>21);f=e+(g^b^c)+d[15]+530742520&4294967295;e=g+(f<<16&4294967295|f>>>16);f=c+(e^g^b)+d[2]+3299628645&4294967295;c=e+(f<<23&4294967295|f>>>9);f=b+(e^(c|~g))+d[0]+4096336452&4294967295;b=c+(f<<6&4294967295|f>>>26);f=g+(c^(b|~e))+d[7]+1126891415&4294967295;g=b+(f<<10&4294967295|f>>>22);f=e+(b^(g|~c))+d[14]+2878612391&4294967295;e=g+(f<<15&4294967295|f>>>17);f=c+(g^(e|~b))+d[5]+4237533241&4294967295;c=e+(f<<21&4294967295|f>>>11);f=b+(e^(c|~g))+d[12]+1700485571&
-4294967295;b=c+(f<<6&4294967295|f>>>26);f=g+(c^(b|~e))+d[3]+2399980690&4294967295;g=b+(f<<10&4294967295|f>>>22);f=e+(b^(g|~c))+d[10]+4293915773&4294967295;e=g+(f<<15&4294967295|f>>>17);f=c+(g^(e|~b))+d[1]+2240044497&4294967295;c=e+(f<<21&4294967295|f>>>11);f=b+(e^(c|~g))+d[8]+1873313359&4294967295;b=c+(f<<6&4294967295|f>>>26);f=g+(c^(b|~e))+d[15]+4264355552&4294967295;g=b+(f<<10&4294967295|f>>>22);f=e+(b^(g|~c))+d[6]+2734768916&4294967295;e=g+(f<<15&4294967295|f>>>17);f=c+(g^(e|~b))+d[13]+1309151649&
-4294967295;c=e+(f<<21&4294967295|f>>>11);f=b+(e^(c|~g))+d[4]+4149444226&4294967295;b=c+(f<<6&4294967295|f>>>26);f=g+(c^(b|~e))+d[11]+3174756917&4294967295;g=b+(f<<10&4294967295|f>>>22);f=e+(b^(g|~c))+d[2]+718787259&4294967295;e=g+(f<<15&4294967295|f>>>17);f=c+(g^(e|~b))+d[9]+3951481745&4294967295;a.chain_[0]=a.chain_[0]+b&4294967295;a.chain_[1]=a.chain_[1]+(e+(f<<21&4294967295|f>>>11))&4294967295;a.chain_[2]=a.chain_[2]+e&4294967295;a.chain_[3]=a.chain_[3]+g&4294967295};
-nd.prototype.update=function(a,b){l(b)||(b=a.length);for(var c=b-this.blockSize,d=this.block_,e=this.blockLength_,g=0;g<b;){if(0==e)for(;g<=c;)od(this,a,g),g+=this.blockSize;if(q(a))for(;g<b;){if(d[e++]=a.charCodeAt(g++),e==this.blockSize){od(this,d);e=0;break}}else for(;g<b;)if(d[e++]=a[g++],e==this.blockSize){od(this,d);e=0;break}}this.blockLength_=e;this.totalLength_+=b};var X=function(){Q.call(this);this.callback_=this.element_=null;this.md5_=new nd};u(X,Q);var pd=function(a,b,c,d,e){a.unwatch();a.element_=b;a.callback_=e;a.listen(b,"keyup",s(a.onChanged_,a,c,d))};X.prototype.unwatch=function(){this.element_&&this.callback_&&(this.removeAll(),this.callback_=this.element_=null)};
-X.prototype.onChanged_=function(a,b){var c;c=(c=this.element_.value)?c.replace(/[\s\xa0]+/g,"").toLowerCase():"";this.md5_.reset();this.md5_.update(c+"."+b);c=this.md5_;var d=Array((56>c.blockLength_?c.blockSize:2*c.blockSize)-c.blockLength_);d[0]=128;for(var e=1;e<d.length-8;++e)d[e]=0;for(var g=8*c.totalLength_,e=d.length-8;e<d.length;++e)d[e]=g&255,g/=256;c.update(d);d=Array(16);for(e=g=0;4>e;++e)for(var f=0;32>f;f+=8)d[g++]=c.chain_[e]>>>f&255;jb(d).toLowerCase()==a.toLowerCase()&&this.callback_()};
-X.prototype.disposeInternal=function(){this.element_=null;X.superClass_.disposeInternal.call(this)};var rd=function(a,b,c){this.adObject_=a;this.captchaImageUrl_=b;this.opt_successCallback_=c||null;qd(this)};u(rd,H);var qd=function(a){var b=new S;nb(a,ga(ob,b));Mc(b,"recaptcha_challenge_image",a.captchaImageUrl_);Mc(b,"recaptcha_ad_image",a.adObject_.imageAdUrl);var c={};xc(b,"load",s(function(a,b){a[b.target.id]=b.target},a,c));xc(b,"complete",s(a.handleImagesLoaded_,a,c));b.start()};
-rd.prototype.handleImagesLoaded_=function(a){a=new V(a.recaptcha_challenge_image,a.recaptcha_ad_image);nb(this,ga(ob,a));var b=tb(document,"recaptcha_image");Ab(b);Oc(a,b);a.adImage_&&Rc(a.adImage_)&&(kd(this.adObject_.delayedImpressionUrl),a=new X,nb(this,ga(ob,a)),pd(a,tb(document,"recaptcha_response_field"),this.adObject_.hashedAnswer,this.adObject_.salt,s(function(a,b){a.unwatch();kd(b)},this,a,this.adObject_.engagementUrl)),this.opt_successCallback_&&this.opt_successCallback_("04"+this.adObject_.token))};var W=w;t("RecaptchaStr",W);var Y=k.RecaptchaOptions;t("RecaptchaOptions",Y);var sd={tabindex:0,theme:"red",callback:null,lang:null,custom_theme_widget:null,custom_translations:null};t("RecaptchaDefaultOptions",sd);
-var Z={widget:null,timer_id:-1,style_set:!1,theme:null,type:"image",ajax_verify_cb:null,th1:null,th2:null,th3:null,element:"",ad_captcha_plugin:null,reload_timeout:-1,force_reload:!1,$:function(a){return"string"==typeof a?document.getElementById(a):a},attachEvent:function(a,b,c){a&&a.addEventListener?a.addEventListener(b,c,!1):a&&a.attachEvent&&a.attachEvent("on"+b,c)},create:function(a,b,c){Z.destroy();b&&(Z.widget=Z.$(b),Z.element=b);Z._init_options(c);Z._call_challenge(a)},destroy:function(){var a=
-Z.$("recaptcha_challenge_field");a&&a.parentNode.removeChild(a);-1!=Z.timer_id&&clearInterval(Z.timer_id);Z.timer_id=-1;if(a=Z.$("recaptcha_image"))a.innerHTML="";Z.update_widget();Z.widget&&("custom"!=Z.theme?Z.widget.innerHTML="":Z.widget.style.display="none",Z.widget=null)},focus_response_field:function(){var a=Z.$("recaptcha_response_field");a&&a.focus()},get_challenge:function(){return"undefined"==typeof RecaptchaState?null:RecaptchaState.challenge},get_response:function(){var a=Z.$("recaptcha_response_field");
-return a?a.value:null},ajax_verify:function(a){Z.ajax_verify_cb=a;a=Z.get_challenge()||"";var b=Z.get_response()||"";a=Z._get_api_server()+"/ajaxverify?c="+encodeURIComponent(a)+"&response="+encodeURIComponent(b);Z._add_script(a)},_ajax_verify_callback:function(a){Z.ajax_verify_cb(a)},_get_overridable_url:function(a){var b=window.location.protocol;if("undefined"!=typeof _RecaptchaOverrideApiServer)a=_RecaptchaOverrideApiServer;else if("undefined"!=typeof RecaptchaState&&"string"==typeof RecaptchaState.server&&
-0<RecaptchaState.server.length)return RecaptchaState.server.replace(/\/+$/,"");return b+"//"+a},_get_api_server:function(){return Z._get_overridable_url("www.google.com/recaptcha/api")},_get_static_url_root:function(){return Z._get_overridable_url("www.gstatic.com/recaptcha/api")},_call_challenge:function(a){a=Z._get_api_server()+"/challenge?k="+a+"&ajax=1&cachestop="+Math.random();Z.getLang_()&&(a+="&lang="+Z.getLang_());"undefined"!=typeof Y.extra_challenge_params&&(a+="&"+Y.extra_challenge_params);
-Z._add_script(a)},_add_script:function(a){var b=document.createElement("script");b.type="text/javascript";b.src=a;Z._get_script_area().appendChild(b)},_get_script_area:function(){var a=document.getElementsByTagName("head");return a=!a||1>a.length?document.body:a[0]},_hash_merge:function(a){for(var b={},c=0;c<a.length;c++)for(var d in a[c])b[d]=a[c][d];return b},_init_options:function(a){Y=Z._hash_merge([sd,a||{}])},challenge_callback_internal:function(){Z.update_widget();Z._reset_timer();W=Z._hash_merge([w,
-sa[Z.getLang_()]||{},Y.custom_translations||{}]);window.addEventListener&&window.addEventListener("unload",function(){Z.destroy()},!1);Z._is_ie()&&window.attachEvent&&window.attachEvent("onbeforeunload",function(){});if(0<navigator.userAgent.indexOf("KHTML")){var a=document.createElement("iframe");a.src="about:blank";a.style.height="0px";a.style.width="0px";a.style.visibility="hidden";a.style.border="none";a.appendChild(document.createTextNode("This frame prevents back/forward cache problems in Safari."));
-document.body.appendChild(a)}Z._finish_widget()},_add_css:function(a){if(-1!=navigator.appVersion.indexOf("MSIE 5"))document.write('<style type="text/css">'+a+"</style>");else{var b=document.createElement("style");b.type="text/css";b.styleSheet?b.styleSheet.cssText=a:b.appendChild(document.createTextNode(a));Z._get_script_area().appendChild(b)}},_set_style:function(a){Z.style_set||(Z.style_set=!0,Z._add_css(a+"\n\n.recaptcha_is_showing_audio .recaptcha_only_if_image,.recaptcha_isnot_showing_audio .recaptcha_only_if_audio,.recaptcha_had_incorrect_sol .recaptcha_only_if_no_incorrect_sol,.recaptcha_nothad_incorrect_sol .recaptcha_only_if_incorrect_sol{display:none !important}"))},
-_init_builtin_theme:function(){var a=Z.$,b=Z._get_static_url_root(),c=v.VertCss,d=v.VertHtml,e=b+"/img/"+Z.theme,g="gif",b=Z.theme;"clean"==b&&(c=v.CleanCss,d=v.CleanHtml,g="png");c=c.replace(/IMGROOT/g,e);Z._set_style(c);Z.update_widget();Z.widget.innerHTML='<div id="recaptcha_area">'+d+"</div>";c=Z.getLang_();a("recaptcha_privacy")&&null!=c&&"en"==c.substring(0,2).toLowerCase()&&null!=W.privacy_and_terms&&0<W.privacy_and_terms.length&&(c=document.createElement("a"),c.href="http://www.google.com/intl/en/policies/",
-c.target="_blank",c.innerHTML=W.privacy_and_terms,a("recaptcha_privacy").appendChild(c));c=function(b,c,d,K){var G=a(b);G.src=e+"/"+c+"."+g;c=W[d];G.alt=c;b=a(b+"_btn");b.title=c;Z.attachEvent(b,"click",K)};c("recaptcha_reload","refresh","refresh_btn",function(){Z.reload_internal("r")});c("recaptcha_switch_audio","audio","audio_challenge",function(){Z.switch_type("audio")});c("recaptcha_switch_img","text","visual_challenge",function(){Z.switch_type("image")});c("recaptcha_whatsthis","help","help_btn",
-Z.showhelp);"clean"==b&&(a("recaptcha_logo").src=e+"/logo."+g);a("recaptcha_table").className="recaptchatable recaptcha_theme_"+Z.theme;b=function(b,c){var d=a(b);d&&(RecaptchaState.rtl&&"span"==d.tagName.toLowerCase()&&(d.dir="rtl"),d.appendChild(document.createTextNode(W[c])))};b("recaptcha_instructions_image","instructions_visual");b("recaptcha_instructions_audio","instructions_audio");b("recaptcha_instructions_error","incorrect_try_again");a("recaptcha_instructions_image")||a("recaptcha_instructions_audio")||
-(b="audio"==Z.type?W.instructions_audio:W.instructions_visual,b=b.replace(/:$/,""),a("recaptcha_response_field").setAttribute("placeholder",b))},_finish_widget:function(){var a=Z.$,b=Y,c=b.theme;c in{blackglass:1,clean:1,custom:1,red:1,white:1}||(c="red");Z.theme||(Z.theme=c);"custom"!=Z.theme?Z._init_builtin_theme():Z._set_style("");c=document.createElement("span");c.id="recaptcha_challenge_field_holder";c.style.display="none";a("recaptcha_response_field").parentNode.insertBefore(c,a("recaptcha_response_field"));
-a("recaptcha_response_field").setAttribute("autocomplete","off");a("recaptcha_image").style.width="300px";a("recaptcha_image").style.height="57px";a("recaptcha_challenge_field_holder").innerHTML='<input type="hidden" name="recaptcha_challenge_field" id="recaptcha_challenge_field" value=""/>';Z.th_init();Z.should_focus=!1;Z.th3||Z.force_reload?(Z._set_challenge(RecaptchaState.challenge,"image",!0),setTimeout(function(){Z.reload_internal("i")},100)):Z._set_challenge(RecaptchaState.challenge,"image",
-!1);Z.updateTabIndexes_();Z.update_widget();Z.widget&&(Z.widget.style.display="");b.callback&&b.callback()},updateTabIndexes_:function(){var a=Z.$,b=Y;b.tabindex&&(b=b.tabindex,a("recaptcha_response_field").tabIndex=b++,"audio"==Z.type&&a("recaptcha_audio_play_again")&&(a("recaptcha_audio_play_again").tabIndex=b++,a("recaptcha_audio_download"),a("recaptcha_audio_download").tabIndex=b++),"custom"!=Z.theme&&(a("recaptcha_reload_btn").tabIndex=b++,a("recaptcha_switch_audio_btn").tabIndex=b++,a("recaptcha_switch_img_btn").tabIndex=
-b++,a("recaptcha_whatsthis_btn").tabIndex=b,a("recaptcha_privacy").tabIndex=b++))},switch_type:function(a){if(!((new Date).getTime()<Z.reload_timeout)&&(Z.type=a,Z.reload_internal("audio"==Z.type?"a":"v"),"custom"!=Z.theme)){a=Z.$;var b="audio"==Z.type?W.instructions_audio:W.instructions_visual,b=b.replace(/:$/,"");a("recaptcha_response_field").setAttribute("placeholder",b)}},reload:function(){Z.reload_internal("r")},reload_internal:function(a){var b=Y,c=RecaptchaState,d=(new Date).getTime();d<Z.reload_timeout||
-(Z.reload_timeout=d+1E3,"undefined"==typeof a&&(a="r"),d=Z._get_api_server()+"/reload?c="+c.challenge+"&k="+c.site+"&reason="+a+"&type="+Z.type,Z.getLang_()&&(d+="&lang="+Z.getLang_()),"undefined"!=typeof b.extra_challenge_params&&(d+="&"+b.extra_challenge_params),Z.th_callback_invoke(),Z.th1&&(d+="&th="+Z.th1,Z.th1=""),"audio"==Z.type&&(d=b.audio_beta_12_08?d+"&audio_beta_12_08=1":d+"&new_audio_default=1"),Z.should_focus="t"!=a&&"i"!=a,Z._add_script(d),ob(Z.ad_captcha_plugin),c.publisher_id=null)},
-th_callback_invoke:function(){if(Z.th3)try{var a=Z.th3.exec();a&&1600>a.length&&(Z.th1=a)}catch(b){Z.th1=""}},finish_reload:function(a,b,c,d){RecaptchaState.payload_url=c;RecaptchaState.is_incorrect=!1;RecaptchaState.publisher_id=d;Z._set_challenge(a,b,!1);Z.updateTabIndexes_()},_set_challenge:function(a,b,c){"image"==b&&RecaptchaState.publisher_id?ld(a,function(){Z._set_challenge_internal(a,b,c)}):Z._set_challenge_internal(a,b,c)},_set_challenge_internal:function(a,b,c){var d=Z.$,e=RecaptchaState;
-e.challenge=a;Z.type=b;d("recaptcha_challenge_field").value=e.challenge;c||("audio"==b?(d("recaptcha_image").innerHTML=Z.getAudioCaptchaHtml(),Z._loop_playback()):"image"==b&&(a=e.payload_url,a||(a=Z._get_api_server()+"/image?c="+e.challenge,Z.th_callback_invoke(),Z.th1&&(a+="&th="+Z.th1,Z.th1="")),id(d("recaptcha_widget_div")),Wa()?Z.ad_captcha_plugin=new rd(Xa(),a,function(a){RecaptchaState.challenge=a;d("recaptcha_challenge_field").value=a}):d("recaptcha_image").innerHTML='<img id="recaptcha_challenge_image" alt="'+
-W.image_alt_text+'" height="57" width="300" src="'+a+'" />',hd()));Z._css_toggle("recaptcha_had_incorrect_sol","recaptcha_nothad_incorrect_sol",e.is_incorrect);Z._css_toggle("recaptcha_is_showing_audio","recaptcha_isnot_showing_audio","audio"==b);Z._clear_input();Z.should_focus&&Z.focus_response_field();Z._reset_timer()},_reset_timer:function(){clearInterval(Z.timer_id);var a=Math.max(1E3*(RecaptchaState.timeout-60),6E4);Z.timer_id=setInterval(function(){Z.reload_internal("t")},a);return a},showhelp:function(){window.open(Z._get_help_link(),
-"recaptcha_popup","width=460,height=580,location=no,menubar=no,status=no,toolbar=no,scrollbars=yes,resizable=yes")},_clear_input:function(){Z.$("recaptcha_response_field").value=""},_displayerror:function(a){var b=Z.$;b("recaptcha_image").innerHTML="";b("recaptcha_image").appendChild(document.createTextNode(a))},reloaderror:function(a){Z._displayerror(a)},_is_ie:function(){return 0<navigator.userAgent.indexOf("MSIE")&&!window.opera},_css_toggle:function(a,b,c){Z.update_widget();var d=Z.widget;d||
-(d=document.body);var e=d.className,e=e.replace(new RegExp("(^|\\s+)"+a+"(\\s+|$)")," "),e=e.replace(new RegExp("(^|\\s+)"+b+"(\\s+|$)")," ");d.className=e+(" "+(c?a:b))},_get_help_link:function(){var a="https://support.google.com/recaptcha/";Z.getLang_()&&(a+="?hl="+Z.getLang_());return a},playAgain:function(){Z.$("recaptcha_image").innerHTML=Z.getAudioCaptchaHtml();Z._loop_playback()},_loop_playback:function(){var a=Z.$("recaptcha_audio_play_again");a&&Z.attachEvent(a,"click",function(){Z.playAgain();
-return!1})},getAudioCaptchaHtml:function(){var a=RecaptchaState.payload_url;a||(a=Z._get_api_server()+"/audio.mp3?c="+RecaptchaState.challenge,Z.th_callback_invoke(),Z.th1&&(a+="&th="+Z.th1,Z.th1=""));var b=Z._get_api_server()+"/swf/audiocaptcha.swf?v2",b=Z._is_ie()?'<object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" id="audiocaptcha" width="0" height="0" codebase="https://fpdownload.macromedia.com/get/flashplayer/current/swflash.cab"><param name="movie" value="'+b+'" /><param name="quality" value="high" /><param name="bgcolor" value="#869ca7" /><param name="allowScriptAccess" value="always" /></object><br/>':
-'<embed src="'+b+'" quality="high" bgcolor="#869ca7" width="0" height="0" name="audiocaptcha" align="middle" play="true" loop="false" quality="high" allowScriptAccess="always" type="application/x-shockwave-flash" pluginspage="http://www.adobe.com/go/getflashplayer" /></embed>',c="";Z.checkFlashVer()&&(c="<br/>"+Z.getSpan_('<a id="recaptcha_audio_play_again" class="recaptcha_audio_cant_hear_link">'+W.play_again+"</a>"));c+="<br/>"+Z.getSpan_('<a id="recaptcha_audio_download" class="recaptcha_audio_cant_hear_link" target="_blank" href="'+
-a+'">'+W.cant_hear_this+"</a>");return b+c},getSpan_:function(a){return"<span"+(RecaptchaState&&RecaptchaState.rtl?' dir="rtl"':"")+">"+a+"</span>"},gethttpwavurl:function(){if("audio"!=Z.type)return"";var a=RecaptchaState.payload_url;a||(a=Z._get_api_server()+"/image?c="+RecaptchaState.challenge,Z.th_callback_invoke(),Z.th1&&(a+="&th="+Z.th1,Z.th1=""));return a},checkFlashVer:function(){var a=-1!=navigator.appVersion.indexOf("MSIE"),b=-1!=navigator.appVersion.toLowerCase().indexOf("win"),c=-1!=navigator.userAgent.indexOf("Opera"),
-d=-1;if(null!=navigator.plugins&&0<navigator.plugins.length){if(navigator.plugins["Shockwave Flash 2.0"]||navigator.plugins["Shockwave Flash"])d=navigator.plugins["Shockwave Flash"+(navigator.plugins["Shockwave Flash 2.0"]?" 2.0":"")].description.split(" ")[2].split(".")[0]}else if(a&&b&&!c)try{d=(new ActiveXObject("ShockwaveFlash.ShockwaveFlash.7")).GetVariable("$version").split(" ")[1].split(",")[0]}catch(e){}return 9<=d},getLang_:function(){return Y.lang?Y.lang:"undefined"!=typeof RecaptchaState&&
-RecaptchaState.lang?RecaptchaState.lang:null},challenge_callback:function(){Z.force_reload=!!RecaptchaState.force_reload;if(RecaptchaState.t3){var a=RecaptchaState.t1?ib(mb(RecaptchaState.t1)):"",b=RecaptchaState.t2?ib(mb(RecaptchaState.t2)):"",c=RecaptchaState.t3?ib(mb(RecaptchaState.t3)):"";Z.th2=c;if(a)b=kc(a),cc(b,Z.challenge_callback_internal,null,void 0),cc(b,null,Z.challenge_callback_internal,void 0);else{if(k.execScript)k.execScript(b,"JavaScript");else if(k.eval)null==ia&&(k.eval("var _et_ = 1;"),
-"undefined"!=typeof k._et_?(delete k._et_,ia=!0):ia=!1),ia?k.eval(b):(a=k.document,c=a.createElement("script"),c.type="text/javascript",c.defer=!1,c.appendChild(a.createTextNode(b)),a.body.appendChild(c),a.body.removeChild(c));else throw Error("goog.globalEval not available");Z.challenge_callback_internal()}}else Z.challenge_callback_internal()},th_init:function(){try{k.thintinel&&k.thintinel.th&&(Z.th3=new k.thintinel.th(Z.th2),Z.th2="")}catch(a){}},update_widget:function(){Z.element&&(Z.widget=
-Z.$(Z.element))}};t("Recaptcha",Z);})()
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 979fa22438..23b4a8d76d 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -15,50 +15,49 @@
 # limitations under the License.
 
 import datetime
-from dateutil import tz
-import time
 import logging
+import time
 
+from dateutil import tz
+
+from synapse.api.constants import PresenceState
 from synapse.storage.devices import DeviceStore
-from .appservice import (
-    ApplicationServiceStore, ApplicationServiceTransactionStore
-)
+from synapse.storage.user_erasure_store import UserErasureStore
+from synapse.util.caches.stream_change_cache import StreamChangeCache
+
+from .account_data import AccountDataStore
+from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore
+from .client_ips import ClientIpStore
+from .deviceinbox import DeviceInboxStore
 from .directory import DirectoryStore
+from .end_to_end_keys import EndToEndKeyStore
+from .engines import PostgresEngine
+from .event_federation import EventFederationStore
+from .event_push_actions import EventPushActionsStore
 from .events import EventsStore
+from .filtering import FilteringStore
+from .group_server import GroupServerStore
+from .keys import KeyStore
+from .media_repository import MediaRepositoryStore
+from .monthly_active_users import MonthlyActiveUsersStore
+from .openid import OpenIdStore
 from .presence import PresenceStore, UserPresenceState
 from .profile import ProfileStore
+from .push_rule import PushRuleStore
+from .pusher import PusherStore
+from .receipts import ReceiptsStore
 from .registration import RegistrationStore
+from .rejections import RejectionsStore
 from .room import RoomStore
 from .roommember import RoomMemberStore
-from .stream import StreamStore
-from .transactions import TransactionStore
-from .keys import KeyStore
-from .event_federation import EventFederationStore
-from .pusher import PusherStore
-from .push_rule import PushRuleStore
-from .media_repository import MediaRepositoryStore
-from .rejections import RejectionsStore
-from .event_push_actions import EventPushActionsStore
-from .deviceinbox import DeviceInboxStore
-from .group_server import GroupServerStore
-from .state import StateStore
-from .signatures import SignatureStore
-from .filtering import FilteringStore
-from .end_to_end_keys import EndToEndKeyStore
-
-from .receipts import ReceiptsStore
 from .search import SearchStore
+from .signatures import SignatureStore
+from .state import StateStore
+from .stream import StreamStore
 from .tags import TagsStore
-from .account_data import AccountDataStore
-from .openid import OpenIdStore
-from .client_ips import ClientIpStore
+from .transactions import TransactionStore
 from .user_directory import UserDirectoryStore
-
-from .util.id_generators import IdGenerator, StreamIdGenerator, ChainedIdGenerator
-from .engines import PostgresEngine
-
-from synapse.api.constants import PresenceState
-from synapse.util.caches.stream_change_cache import StreamChangeCache
+from .util.id_generators import ChainedIdGenerator, IdGenerator, StreamIdGenerator
 
 logger = logging.getLogger(__name__)
 
@@ -68,6 +67,7 @@ class DataStore(RoomMemberStore, RoomStore,
                 PresenceStore, TransactionStore,
                 DirectoryStore, KeyStore, StateStore, SignatureStore,
                 ApplicationServiceStore,
+                EventsStore,
                 EventFederationStore,
                 MediaRepositoryStore,
                 RejectionsStore,
@@ -75,7 +75,6 @@ class DataStore(RoomMemberStore, RoomStore,
                 PusherStore,
                 PushRuleStore,
                 ApplicationServiceTransactionStore,
-                EventsStore,
                 ReceiptsStore,
                 EndToEndKeyStore,
                 SearchStore,
@@ -88,6 +87,8 @@ class DataStore(RoomMemberStore, RoomStore,
                 DeviceInboxStore,
                 UserDirectoryStore,
                 GroupServerStore,
+                UserErasureStore,
+                MonthlyActiveUsersStore,
                 ):
 
     def __init__(self, db_conn, hs):
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 22d6257a9f..be61147b9b 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -13,22 +13,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+import sys
+import threading
+import time
 
-from synapse.api.errors import StoreError
-from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
-from synapse.util.caches.descriptors import Cache
-from synapse.storage.engines import PostgresEngine
+from six import PY2, iteritems, iterkeys, itervalues
+from six.moves import intern, range
 
+from canonicaljson import json
 from prometheus_client import Histogram
 
 from twisted.internet import defer
 
-import sys
-import time
-import threading
-
-from six import itervalues, iterkeys, iteritems
-from six.moves import intern, range
+from synapse.api.errors import StoreError
+from synapse.storage.engines import PostgresEngine
+from synapse.util.caches.descriptors import Cache
+from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
 
 logger = logging.getLogger(__name__)
 
@@ -221,7 +221,7 @@ class SQLBaseStore(object):
         self._clock.looping_call(loop, 10000)
 
     def _new_transaction(self, conn, desc, after_callbacks, exception_callbacks,
-                         logging_context, func, *args, **kwargs):
+                         func, *args, **kwargs):
         start = time.time()
         txn_id = self._TXN_ID
 
@@ -285,8 +285,7 @@ class SQLBaseStore(object):
             end = time.time()
             duration = end - start
 
-            if logging_context is not None:
-                logging_context.add_database_transaction(duration)
+            LoggingContext.current_context().add_database_transaction(duration)
 
             transaction_logger.debug("[TXN END] {%s} %f sec", name, duration)
 
@@ -310,19 +309,21 @@ class SQLBaseStore(object):
         Returns:
             Deferred: The result of func
         """
-        current_context = LoggingContext.current_context()
-
         after_callbacks = []
         exception_callbacks = []
 
-        def inner_func(conn, *args, **kwargs):
-            return self._new_transaction(
-                conn, desc, after_callbacks, exception_callbacks, current_context,
-                func, *args, **kwargs
+        if LoggingContext.current_context() == LoggingContext.sentinel:
+            logger.warn(
+                "Starting db txn '%s' from sentinel context",
+                desc,
             )
 
         try:
-            result = yield self.runWithConnection(inner_func, *args, **kwargs)
+            result = yield self.runWithConnection(
+                self._new_transaction,
+                desc, after_callbacks, exception_callbacks, func,
+                *args, **kwargs
+            )
 
             for after_callback, after_args, after_kwargs in after_callbacks:
                 after_callback(*after_args, **after_kwargs)
@@ -347,22 +348,25 @@ class SQLBaseStore(object):
         Returns:
             Deferred: The result of func
         """
-        current_context = LoggingContext.current_context()
+        parent_context = LoggingContext.current_context()
+        if parent_context == LoggingContext.sentinel:
+            logger.warn(
+                "Starting db connection from sentinel context: metrics will be lost",
+            )
+            parent_context = None
 
         start_time = time.time()
 
         def inner_func(conn, *args, **kwargs):
-            with LoggingContext("runWithConnection") as context:
+            with LoggingContext("runWithConnection", parent_context) as context:
                 sched_duration_sec = time.time() - start_time
                 sql_scheduling_timer.observe(sched_duration_sec)
-                current_context.add_database_scheduled(sched_duration_sec)
+                context.add_database_scheduled(sched_duration_sec)
 
                 if self.database_engine.is_connection_closed(conn):
                     logger.debug("Reconnecting closed database connection")
                     conn.reconnect()
 
-                current_context.copy_to(context)
-
                 return func(conn, *args, **kwargs)
 
         with PreserveLoggingContext():
@@ -1147,17 +1151,16 @@ class SQLBaseStore(object):
         defer.returnValue(retval)
 
     def get_user_count_txn(self, txn):
-        """Get a total number of registerd users in the users list.
+        """Get a total number of registered users in the users list.
 
         Args:
             txn : Transaction object
         Returns:
-            defer.Deferred: resolves to int
+            int : number of users
         """
         sql_count = "SELECT COUNT(*) FROM users WHERE is_guest = 0;"
         txn.execute(sql_count)
-        count = txn.fetchone()[0]
-        defer.returnValue(count)
+        return txn.fetchone()[0]
 
     def _simple_search_list(self, table, term, col, retcols,
                             desc="_simple_search_list"):
@@ -1214,3 +1217,32 @@ class _RollbackButIsFineException(Exception):
     something went wrong.
     """
     pass
+
+
+def db_to_json(db_content):
+    """
+    Take some data from a database row and return a JSON-decoded object.
+
+    Args:
+        db_content (memoryview|buffer|bytes|bytearray|unicode)
+    """
+    # psycopg2 on Python 3 returns memoryview objects, which we need to
+    # cast to bytes to decode
+    if isinstance(db_content, memoryview):
+        db_content = db_content.tobytes()
+
+    # psycopg2 on Python 2 returns buffer objects, which we need to cast to
+    # bytes to decode
+    if PY2 and isinstance(db_content, buffer):
+        db_content = bytes(db_content)
+
+    # Decode it to a Unicode string before feeding it to json.loads, so we
+    # consistenty get a Unicode-containing object out.
+    if isinstance(db_content, (bytes, bytearray)):
+        db_content = db_content.decode('utf8')
+
+    try:
+        return json.loads(db_content)
+    except Exception:
+        logging.warning("Tried to decode '%r' as JSON and failed", db_content)
+        raise
diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py
index f83ff0454a..bbc3355c73 100644
--- a/synapse/storage/account_data.py
+++ b/synapse/storage/account_data.py
@@ -14,17 +14,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import abc
+import logging
+
+from canonicaljson import json
+
 from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.util.id_generators import StreamIdGenerator
-
+from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 from synapse.util.caches.stream_change_cache import StreamChangeCache
-from synapse.util.caches.descriptors import cached, cachedList, cachedInlineCallbacks
-
-import abc
-import simplejson as json
-import logging
 
 logger = logging.getLogger(__name__)
 
@@ -114,25 +114,6 @@ class AccountDataWorkerStore(SQLBaseStore):
         else:
             defer.returnValue(None)
 
-    @cachedList(cached_method_name="get_global_account_data_by_type_for_user",
-                num_args=2, list_name="user_ids", inlineCallbacks=True)
-    def get_global_account_data_by_type_for_users(self, data_type, user_ids):
-        rows = yield self._simple_select_many_batch(
-            table="account_data",
-            column="user_id",
-            iterable=user_ids,
-            keyvalues={
-                "account_data_type": data_type,
-            },
-            retcols=("user_id", "content",),
-            desc="get_global_account_data_by_type_for_users",
-        )
-
-        defer.returnValue({
-            row["user_id"]: json.loads(row["content"]) if row["content"] else None
-            for row in rows
-        })
-
     @cached(num_args=2)
     def get_account_data_for_room(self, user_id, room_id):
         """Get all the client account_data for a user for a room.
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index 12ea8a158c..31248d5e06 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -15,14 +15,16 @@
 # limitations under the License.
 import logging
 import re
-import simplejson as json
+
+from canonicaljson import json
+
 from twisted.internet import defer
 
 from synapse.appservice import AppServiceTransaction
 from synapse.config.appservice import load_appservices
-from synapse.storage.events import EventsWorkerStore
-from ._base import SQLBaseStore
+from synapse.storage.events_worker import EventsWorkerStore
 
+from ._base import SQLBaseStore
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 8af325a9f5..5fe1ca2de7 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -12,15 +12,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import synapse.util.async
 
-from ._base import SQLBaseStore
-from . import engines
+import logging
+
+from canonicaljson import json
 
 from twisted.internet import defer
 
-import simplejson as json
-import logging
+from synapse.metrics.background_process_metrics import run_as_background_process
+
+from . import engines
+from ._base import SQLBaseStore
 
 logger = logging.getLogger(__name__)
 
@@ -87,12 +89,16 @@ class BackgroundUpdateStore(SQLBaseStore):
         self._background_update_handlers = {}
         self._all_done = False
 
-    @defer.inlineCallbacks
     def start_doing_background_updates(self):
-        logger.info("Starting background schema updates")
+        run_as_background_process(
+            "background_updates", self._run_background_updates,
+        )
 
+    @defer.inlineCallbacks
+    def _run_background_updates(self):
+        logger.info("Starting background schema updates")
         while True:
-            yield synapse.util.async.sleep(
+            yield self.hs.get_clock().sleep(
                 self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.)
 
             try:
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py
index ce338514e8..8fc678fa67 100644
--- a/synapse/storage/client_ips.py
+++ b/synapse/storage/client_ips.py
@@ -15,15 +15,15 @@
 
 import logging
 
-from twisted.internet import defer, reactor
+from six import iteritems
 
-from ._base import Cache
-from . import background_updates
+from twisted.internet import defer
 
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.util.caches import CACHE_SIZE_FACTOR
 
-from six import iteritems
-
+from . import background_updates
+from ._base import Cache
 
 logger = logging.getLogger(__name__)
 
@@ -35,6 +35,7 @@ LAST_SEEN_GRANULARITY = 120 * 1000
 
 class ClientIpStore(background_updates.BackgroundUpdateStore):
     def __init__(self, db_conn, hs):
+
         self.client_ip_last_seen = Cache(
             name="client_ip_last_seen",
             keylen=4,
@@ -70,8 +71,11 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
         self._client_ip_looper = self._clock.looping_call(
             self._update_client_ips_batch, 5 * 1000
         )
-        reactor.addSystemEventTrigger("before", "shutdown", self._update_client_ips_batch)
+        self.hs.get_reactor().addSystemEventTrigger(
+            "before", "shutdown", self._update_client_ips_batch
+        )
 
+    @defer.inlineCallbacks
     def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id,
                          now=None):
         if not now:
@@ -82,7 +86,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
             last_seen = self.client_ip_last_seen.get(key)
         except KeyError:
             last_seen = None
-
+        yield self.populate_monthly_active_users(user_id)
         # Rate-limited inserts
         if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
             return
@@ -92,10 +96,21 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
         self._batch_row_update[key] = (user_agent, device_id, now)
 
     def _update_client_ips_batch(self):
-        to_update = self._batch_row_update
-        self._batch_row_update = {}
-        return self.runInteraction(
-            "_update_client_ips_batch", self._update_client_ips_batch_txn, to_update
+
+        # If the DB pool has already terminated, don't try updating
+        if not self.hs.get_db_pool().running:
+            return
+
+        def update():
+            to_update = self._batch_row_update
+            self._batch_row_update = {}
+            return self.runInteraction(
+                "_update_client_ips_batch", self._update_client_ips_batch_txn,
+                to_update,
+            )
+
+        return run_as_background_process(
+            "update_client_ips", update,
         )
 
     def _update_client_ips_batch_txn(self, txn, to_update):
diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py
index a879e5bfc1..e06b0bc56d 100644
--- a/synapse/storage/deviceinbox.py
+++ b/synapse/storage/deviceinbox.py
@@ -14,14 +14,14 @@
 # limitations under the License.
 
 import logging
-import simplejson
 
-from twisted.internet import defer
+from canonicaljson import json
 
-from .background_updates import BackgroundUpdateStore
+from twisted.internet import defer
 
 from synapse.util.caches.expiringcache import ExpiringCache
 
+from .background_updates import BackgroundUpdateStore
 
 logger = logging.getLogger(__name__)
 
@@ -85,7 +85,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
             )
             rows = []
             for destination, edu in remote_messages_by_destination.items():
-                edu_json = simplejson.dumps(edu)
+                edu_json = json.dumps(edu)
                 rows.append((destination, stream_id, now_ms, edu_json))
             txn.executemany(sql, rows)
 
@@ -169,7 +169,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
         local_by_user_then_device = {}
         for user_id, messages_by_device in messages_by_user_then_device.items():
             messages_json_for_user = {}
-            devices = messages_by_device.keys()
+            devices = list(messages_by_device.keys())
             if len(devices) == 1 and devices[0] == "*":
                 # Handle wildcard device_ids.
                 sql = (
@@ -177,7 +177,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
                     " WHERE user_id = ?"
                 )
                 txn.execute(sql, (user_id,))
-                message_json = simplejson.dumps(messages_by_device["*"])
+                message_json = json.dumps(messages_by_device["*"])
                 for row in txn:
                     # Add the message for all devices for this user on this
                     # server.
@@ -199,7 +199,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
                     # Only insert into the local inbox if the device exists on
                     # this server
                     device = row[0]
-                    message_json = simplejson.dumps(messages_by_device[device])
+                    message_json = json.dumps(messages_by_device[device])
                     messages_json_for_user[device] = message_json
 
             if messages_json_for_user:
@@ -253,7 +253,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
             messages = []
             for row in txn:
                 stream_pos = row[0]
-                messages.append(simplejson.loads(row[1]))
+                messages.append(json.loads(row[1]))
             if len(messages) < limit:
                 stream_pos = current_stream_id
             return (messages, stream_pos)
@@ -389,7 +389,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
             messages = []
             for row in txn:
                 stream_pos = row[0]
-                messages.append(simplejson.loads(row[1]))
+                messages.append(json.loads(row[1]))
             if len(messages) < limit:
                 stream_pos = current_stream_id
             return (messages, stream_pos)
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
index d149d8392e..d10ff9e4b9 100644
--- a/synapse/storage/devices.py
+++ b/synapse/storage/devices.py
@@ -13,15 +13,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-import simplejson as json
+
+from six import iteritems, itervalues
+
+from canonicaljson import json
 
 from twisted.internet import defer
 
 from synapse.api.errors import StoreError
-from ._base import SQLBaseStore, Cache
-from synapse.util.caches.descriptors import cached, cachedList, cachedInlineCallbacks
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
 
-from six import itervalues, iteritems
+from ._base import Cache, SQLBaseStore, db_to_json
 
 logger = logging.getLogger(__name__)
 
@@ -246,17 +249,31 @@ class DeviceStore(SQLBaseStore):
 
     def _update_remote_device_list_cache_entry_txn(self, txn, user_id, device_id,
                                                    content, stream_id):
-        self._simple_upsert_txn(
-            txn,
-            table="device_lists_remote_cache",
-            keyvalues={
-                "user_id": user_id,
-                "device_id": device_id,
-            },
-            values={
-                "content": json.dumps(content),
-            }
-        )
+        if content.get("deleted"):
+            self._simple_delete_txn(
+                txn,
+                table="device_lists_remote_cache",
+                keyvalues={
+                    "user_id": user_id,
+                    "device_id": device_id,
+                },
+            )
+
+            txn.call_after(
+                self.device_id_exists_cache.invalidate, (user_id, device_id,)
+            )
+        else:
+            self._simple_upsert_txn(
+                txn,
+                table="device_lists_remote_cache",
+                keyvalues={
+                    "user_id": user_id,
+                    "device_id": device_id,
+                },
+                values={
+                    "content": json.dumps(content),
+                }
+            )
 
         txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id,))
         txn.call_after(self._get_cached_devices_for_user.invalidate, (user_id,))
@@ -364,7 +381,7 @@ class DeviceStore(SQLBaseStore):
             now_stream_id = max(stream_id for stream_id in itervalues(query_map))
 
         devices = self._get_e2e_device_keys_txn(
-            txn, query_map.keys(), include_all_devices=True
+            txn, query_map.keys(), include_all_devices=True, include_deleted_devices=True
         )
 
         prev_sent_id_sql = """
@@ -391,12 +408,15 @@ class DeviceStore(SQLBaseStore):
 
                 prev_id = stream_id
 
-                key_json = device.get("key_json", None)
-                if key_json:
-                    result["keys"] = json.loads(key_json)
-                device_display_name = device.get("device_display_name", None)
-                if device_display_name:
-                    result["device_display_name"] = device_display_name
+                if device is not None:
+                    key_json = device.get("key_json", None)
+                    if key_json:
+                        result["keys"] = db_to_json(key_json)
+                    device_display_name = device.get("device_display_name", None)
+                    if device_display_name:
+                        result["device_display_name"] = device_display_name
+                else:
+                    result["deleted"] = True
 
                 results.append(result)
 
@@ -446,7 +466,7 @@ class DeviceStore(SQLBaseStore):
             retcol="content",
             desc="_get_cached_user_device",
         )
-        defer.returnValue(json.loads(content))
+        defer.returnValue(db_to_json(content))
 
     @cachedInlineCallbacks()
     def _get_cached_devices_for_user(self, user_id):
@@ -459,7 +479,7 @@ class DeviceStore(SQLBaseStore):
             desc="_get_cached_devices_for_user",
         )
         defer.returnValue({
-            device["device_id"]: json.loads(device["content"])
+            device["device_id"]: db_to_json(device["content"])
             for device in devices
         })
 
@@ -491,7 +511,7 @@ class DeviceStore(SQLBaseStore):
 
                 key_json = device.get("key_json", None)
                 if key_json:
-                    result["keys"] = json.loads(key_json)
+                    result["keys"] = db_to_json(key_json)
                 device_display_name = device.get("device_display_name", None)
                 if device_display_name:
                     result["device_display_name"] = device_display_name
@@ -692,6 +712,9 @@ class DeviceStore(SQLBaseStore):
 
             logger.info("Pruned %d device list outbound pokes", txn.rowcount)
 
-        return self.runInteraction(
-            "_prune_old_outbound_device_pokes", _prune_txn
+        return run_as_background_process(
+            "prune_old_outbound_device_pokes",
+            self.runInteraction,
+            "_prune_old_outbound_device_pokes",
+            _prune_txn,
         )
diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py
index d0c0059757..808194236a 100644
--- a/synapse/storage/directory.py
+++ b/synapse/storage/directory.py
@@ -13,15 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import SQLBaseStore
-from synapse.util.caches.descriptors import cached
-
-from synapse.api.errors import SynapseError
+from collections import namedtuple
 
 from twisted.internet import defer
 
-from collections import namedtuple
+from synapse.api.errors import SynapseError
+from synapse.util.caches.descriptors import cached
 
+from ._base import SQLBaseStore
 
 RoomAliasMapping = namedtuple(
     "RoomAliasMapping",
diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py
index b146487943..1f1721e820 100644
--- a/synapse/storage/end_to_end_keys.py
+++ b/synapse/storage/end_to_end_keys.py
@@ -12,16 +12,15 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from twisted.internet import defer
-
-from synapse.util.caches.descriptors import cached
+from six import iteritems
 
 from canonicaljson import encode_canonical_json
-import simplejson as json
 
-from ._base import SQLBaseStore
+from twisted.internet import defer
 
-from six import iteritems
+from synapse.util.caches.descriptors import cached
+
+from ._base import SQLBaseStore, db_to_json
 
 
 class EndToEndKeyStore(SQLBaseStore):
@@ -65,12 +64,18 @@ class EndToEndKeyStore(SQLBaseStore):
         )
 
     @defer.inlineCallbacks
-    def get_e2e_device_keys(self, query_list, include_all_devices=False):
+    def get_e2e_device_keys(
+        self, query_list, include_all_devices=False,
+        include_deleted_devices=False,
+    ):
         """Fetch a list of device keys.
         Args:
             query_list(list): List of pairs of user_ids and device_ids.
             include_all_devices (bool): whether to include entries for devices
                 that don't have device keys
+            include_deleted_devices (bool): whether to include null entries for
+                devices which no longer exist (but were in the query_list).
+                This option only takes effect if include_all_devices is true.
         Returns:
             Dict mapping from user-id to dict mapping from device_id to
             dict containing "key_json", "device_display_name".
@@ -80,19 +85,28 @@ class EndToEndKeyStore(SQLBaseStore):
 
         results = yield self.runInteraction(
             "get_e2e_device_keys", self._get_e2e_device_keys_txn,
-            query_list, include_all_devices,
+            query_list, include_all_devices, include_deleted_devices,
         )
 
         for user_id, device_keys in iteritems(results):
             for device_id, device_info in iteritems(device_keys):
-                device_info["keys"] = json.loads(device_info.pop("key_json"))
+                device_info["keys"] = db_to_json(device_info.pop("key_json"))
 
         defer.returnValue(results)
 
-    def _get_e2e_device_keys_txn(self, txn, query_list, include_all_devices):
+    def _get_e2e_device_keys_txn(
+        self, txn, query_list, include_all_devices=False,
+        include_deleted_devices=False,
+    ):
         query_clauses = []
         query_params = []
 
+        if include_all_devices is False:
+            include_deleted_devices = False
+
+        if include_deleted_devices:
+            deleted_devices = set(query_list)
+
         for (user_id, device_id) in query_list:
             query_clause = "user_id = ?"
             query_params.append(user_id)
@@ -120,8 +134,14 @@ class EndToEndKeyStore(SQLBaseStore):
 
         result = {}
         for row in rows:
+            if include_deleted_devices:
+                deleted_devices.remove((row["user_id"], row["device_id"]))
             result.setdefault(row["user_id"], {})[row["device_id"]] = row
 
+        if include_deleted_devices:
+            for user_id, device_id in deleted_devices:
+                result.setdefault(user_id, {})[device_id] = None
+
         return result
 
     @defer.inlineCallbacks
diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py
index 8c868ece75..e2f9de8451 100644
--- a/synapse/storage/engines/__init__.py
+++ b/synapse/storage/engines/__init__.py
@@ -13,13 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import IncorrectDatabaseSetup
-from .postgres import PostgresEngine
-from .sqlite3 import Sqlite3Engine
-
 import importlib
 import platform
 
+from ._base import IncorrectDatabaseSetup
+from .postgres import PostgresEngine
+from .sqlite3 import Sqlite3Engine
 
 SUPPORTED_MODULE = {
     "sqlite3": Sqlite3Engine,
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 8a0386c1a4..42225f8a2a 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -41,13 +41,18 @@ class PostgresEngine(object):
         db_conn.set_isolation_level(
             self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
         )
+
+        # Set the bytea output to escape, vs the default of hex
+        cursor = db_conn.cursor()
+        cursor.execute("SET bytea_output TO escape")
+
         # Asynchronous commit, don't wait for the server to call fsync before
         # ending the transaction.
         # https://www.postgresql.org/docs/current/static/wal-async-commit.html
         if not self.synchronous_commit:
-            cursor = db_conn.cursor()
             cursor.execute("SET synchronous_commit TO OFF")
-            cursor.close()
+
+        cursor.close()
 
     def is_deadlock(self, error):
         if isinstance(error, self.module.DatabaseError):
diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite3.py
index 60f0fa7fb3..19949fc474 100644
--- a/synapse/storage/engines/sqlite3.py
+++ b/synapse/storage/engines/sqlite3.py
@@ -13,11 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.prepare_database import prepare_database
-
 import struct
 import threading
 
+from synapse.storage.prepare_database import prepare_database
+
 
 class Sqlite3Engine(object):
     single_threaded = True
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index 8fbf7ffba7..24345b20a6 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -12,23 +12,22 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import logging
 import random
 
+from six.moves import range
+from six.moves.queue import Empty, PriorityQueue
+
+from unpaddedbase64 import encode_base64
+
 from twisted.internet import defer
 
+from synapse.api.errors import StoreError
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.events import EventsWorkerStore
+from synapse.storage.events_worker import EventsWorkerStore
 from synapse.storage.signatures import SignatureWorkerStore
-
-from synapse.api.errors import StoreError
 from synapse.util.caches.descriptors import cached
-from unpaddedbase64 import encode_base64
-
-import logging
-from six.moves.queue import PriorityQueue, Empty
-
-from six.moves import range
-
 
 logger = logging.getLogger(__name__)
 
@@ -115,9 +114,9 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
         sql = (
             "SELECT b.event_id, MAX(e.depth) FROM events as e"
             " INNER JOIN event_edges as g"
-            " ON g.event_id = e.event_id AND g.room_id = e.room_id"
+            " ON g.event_id = e.event_id"
             " INNER JOIN event_backward_extremities as b"
-            " ON g.prev_event_id = b.event_id AND g.room_id = b.room_id"
+            " ON g.prev_event_id = b.event_id"
             " WHERE b.room_id = ? AND g.is_state is ?"
             " GROUP BY b.event_id"
         )
@@ -331,8 +330,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
             "SELECT depth, prev_event_id FROM event_edges"
             " INNER JOIN events"
             " ON prev_event_id = events.event_id"
-            " AND event_edges.room_id = events.room_id"
-            " WHERE event_edges.room_id = ? AND event_edges.event_id = ?"
+            " WHERE event_edges.event_id = ?"
             " AND event_edges.is_state = ?"
             " LIMIT ?"
         )
@@ -345,6 +343,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
                 table="events",
                 keyvalues={
                     "event_id": event_id,
+                    "room_id": room_id,
                 },
                 retcol="depth",
                 allow_none=True,
@@ -366,7 +365,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
 
             txn.execute(
                 query,
-                (room_id, event_id, False, limit - len(event_results))
+                (event_id, False, limit - len(event_results))
             )
 
             for row in txn:
@@ -403,7 +402,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
 
         query = (
             "SELECT prev_event_id FROM event_edges "
-            "WHERE room_id = ? AND event_id = ? AND is_state = ? "
+            "WHERE event_id = ? AND is_state = ? "
             "LIMIT ?"
         )
 
@@ -412,7 +411,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
             for event_id in front:
                 txn.execute(
                     query,
-                    (room_id, event_id, False, limit - len(event_results))
+                    (event_id, False, limit - len(event_results))
                 )
 
                 for e_id, in txn:
@@ -448,7 +447,7 @@ class EventFederationStore(EventFederationWorkerStore):
         )
 
         hs.get_clock().looping_call(
-            self._delete_old_forward_extrem_cache, 60 * 60 * 1000
+            self._delete_old_forward_extrem_cache, 60 * 60 * 1000,
         )
 
     def _update_min_depth_for_room_txn(self, txn, room_id, depth):
@@ -550,9 +549,11 @@ class EventFederationStore(EventFederationWorkerStore):
                 sql,
                 (self.stream_ordering_month_ago, self.stream_ordering_month_ago,)
             )
-        return self.runInteraction(
+        return run_as_background_process(
+            "delete_old_forward_extrem_cache",
+            self.runInteraction,
             "_delete_old_forward_extrem_cache",
-            _delete_old_forward_extrem_cache_txn
+            _delete_old_forward_extrem_cache_txn,
         )
 
     def clean_room_for_join(self, room_id):
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index d0350ee5fe..6840320641 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -14,16 +14,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage._base import SQLBaseStore, LoggingTransaction
-from twisted.internet import defer
-from synapse.util.async import sleep
-from synapse.util.caches.descriptors import cachedInlineCallbacks
-
 import logging
-import simplejson as json
 
 from six import iteritems
 
+from canonicaljson import json
+
+from twisted.internet import defer
+
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage._base import LoggingTransaction, SQLBaseStore
+from synapse.util.caches.descriptors import cachedInlineCallbacks
+
 logger = logging.getLogger(__name__)
 
 
@@ -84,6 +86,8 @@ class EventPushActionsWorkerStore(SQLBaseStore):
         self.find_stream_orderings_looping_call = self._clock.looping_call(
             self._find_stream_orderings_for_times, 10 * 60 * 1000
         )
+        self._rotate_delay = 3
+        self._rotate_count = 10000
 
     @cachedInlineCallbacks(num_args=3, tree=True, max_entries=5000)
     def get_unread_event_push_actions_by_room_for_user(
@@ -455,11 +459,12 @@ class EventPushActionsWorkerStore(SQLBaseStore):
                 "Error removing push actions after event persistence failure",
             )
 
-    @defer.inlineCallbacks
     def _find_stream_orderings_for_times(self):
-        yield self.runInteraction(
+        return run_as_background_process(
+            "event_push_action_stream_orderings",
+            self.runInteraction,
             "_find_stream_orderings_for_times",
-            self._find_stream_orderings_for_times_txn
+            self._find_stream_orderings_for_times_txn,
         )
 
     def _find_stream_orderings_for_times_txn(self, txn):
@@ -601,7 +606,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
 
         self._doing_notif_rotation = False
         self._rotate_notif_loop = self._clock.looping_call(
-            self._rotate_notifs, 30 * 60 * 1000
+            self._start_rotate_notifs, 30 * 60 * 1000,
         )
 
     def _set_push_actions_for_event_and_users_txn(self, txn, events_and_contexts,
@@ -784,6 +789,9 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
             WHERE room_id = ? AND user_id = ? AND stream_ordering <= ?
         """, (room_id, user_id, stream_ordering))
 
+    def _start_rotate_notifs(self):
+        return run_as_background_process("rotate_notifs", self._rotate_notifs)
+
     @defer.inlineCallbacks
     def _rotate_notifs(self):
         if self._doing_notif_rotation or self.stream_ordering_day_ago is None:
@@ -800,7 +808,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
                 )
                 if caught_up:
                     break
-                yield sleep(5)
+                yield self.hs.get_clock().sleep(self._rotate_delay)
         finally:
             self._doing_notif_rotation = False
 
@@ -821,8 +829,8 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
         txn.execute("""
             SELECT stream_ordering FROM event_push_actions
             WHERE stream_ordering > ?
-            ORDER BY stream_ordering ASC LIMIT 1 OFFSET 50000
-        """, (old_rotate_stream_ordering,))
+            ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
+        """, (old_rotate_stream_ordering, self._rotate_count))
         stream_row = txn.fetchone()
         if stream_row:
             offset_stream_ordering, = stream_row
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index cb1082e864..8bf87f38f7 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -14,36 +14,36 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from collections import OrderedDict, deque, namedtuple
-from functools import wraps
 import itertools
 import logging
+from collections import OrderedDict, deque, namedtuple
+from functools import wraps
+
+from six import iteritems, text_type
+from six.moves import range
+
+from canonicaljson import json
+from prometheus_client import Counter
 
-import simplejson as json
 from twisted.internet import defer
 
+import synapse.metrics
+from synapse.api.constants import EventTypes
+from synapse.api.errors import SynapseError
+# these are only included to make the type annotations work
+from synapse.events import EventBase  # noqa: F401
+from synapse.events.snapshot import EventContext  # noqa: F401
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.background_updates import BackgroundUpdateStore
+from synapse.storage.event_federation import EventFederationStore
 from synapse.storage.events_worker import EventsWorkerStore
-from synapse.util.async import ObservableDeferred
+from synapse.types import RoomStreamToken, get_domain_from_id
+from synapse.util.async_helpers import ObservableDeferred
+from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 from synapse.util.frozenutils import frozendict_json_encoder
-from synapse.util.logcontext import (
-    PreserveLoggingContext, make_deferred_yieldable,
-)
+from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable
 from synapse.util.logutils import log_function
 from synapse.util.metrics import Measure
-from synapse.api.constants import EventTypes
-from synapse.api.errors import SynapseError
-from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
-from synapse.types import get_domain_from_id, RoomStreamToken
-import synapse.metrics
-
-# these are only included to make the type annotations work
-from synapse.events import EventBase    # noqa: F401
-from synapse.events.snapshot import EventContext   # noqa: F401
-
-from six.moves import range
-from six import itervalues, iteritems
-
-from prometheus_client import Counter
 
 logger = logging.getLogger(__name__)
 
@@ -67,7 +67,13 @@ state_delta_reuse_delta_counter = Counter(
 
 
 def encode_json(json_object):
-    return frozendict_json_encoder.encode(json_object)
+    """
+    Encode a Python object as JSON and return it in a Unicode string.
+    """
+    out = frozendict_json_encoder.encode(json_object)
+    if isinstance(out, bytes):
+        out = out.decode('utf8')
+    return out
 
 
 class _EventPeristenceQueue(object):
@@ -144,25 +150,22 @@ class _EventPeristenceQueue(object):
             try:
                 queue = self._get_drainining_queue(room_id)
                 for item in queue:
-                    # handle_queue_loop runs in the sentinel logcontext, so
-                    # there is no need to preserve_fn when running the
-                    # callbacks on the deferred.
                     try:
                         ret = yield per_item_callback(item)
-                        item.deferred.callback(ret)
                     except Exception:
-                        item.deferred.errback()
+                        with PreserveLoggingContext():
+                            item.deferred.errback()
+                    else:
+                        with PreserveLoggingContext():
+                            item.deferred.callback(ret)
             finally:
                 queue = self._event_persist_queues.pop(room_id, None)
                 if queue:
                     self._event_persist_queues[room_id] = queue
                 self._currently_persisting_rooms.discard(room_id)
 
-        # set handle_queue_loop off on the background. We don't want to
-        # attribute work done in it to the current request, so we drop the
-        # logcontext altogether.
-        with PreserveLoggingContext():
-            handle_queue_loop()
+        # set handle_queue_loop off in the background
+        run_as_background_process("persist_events", handle_queue_loop)
 
     def _get_drainining_queue(self, room_id):
         queue = self._event_persist_queues.setdefault(room_id, deque())
@@ -198,7 +201,9 @@ def _retry_on_integrity_error(func):
     return f
 
 
-class EventsStore(EventsWorkerStore):
+# inherits from EventFederationStore so that we can call _update_backward_extremities
+# and _handle_mult_prev_events (though arguably those could both be moved in here)
+class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore):
     EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
     EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
 
@@ -236,12 +241,18 @@ class EventsStore(EventsWorkerStore):
 
         self._state_resolution_handler = hs.get_state_resolution_handler()
 
+    @defer.inlineCallbacks
     def persist_events(self, events_and_contexts, backfilled=False):
         """
         Write events to the database
         Args:
             events_and_contexts: list of tuples of (event, context)
-            backfilled: ?
+            backfilled (bool): Whether the results are retrieved from federation
+                via backfill or not. Used to determine if they're "new" events
+                which might update the current state etc.
+
+        Returns:
+            Deferred[int]: the stream ordering of the latest persisted event
         """
         partitioned = {}
         for event, ctx in events_and_contexts:
@@ -258,10 +269,14 @@ class EventsStore(EventsWorkerStore):
         for room_id in partitioned:
             self._maybe_start_persisting(room_id)
 
-        return make_deferred_yieldable(
+        yield make_deferred_yieldable(
             defer.gatherResults(deferreds, consumeErrors=True)
         )
 
+        max_persisted_id = yield self._stream_id_gen.get_current_token()
+
+        defer.returnValue(max_persisted_id)
+
     @defer.inlineCallbacks
     @log_function
     def persist_event(self, event, context, backfilled=False):
@@ -348,11 +363,14 @@ class EventsStore(EventsWorkerStore):
                 new_forward_extremeties = {}
 
                 # map room_id->(type,state_key)->event_id tracking the full
-                # state in each room after adding these events
+                # state in each room after adding these events.
+                # This is simply used to prefill the get_current_state_ids
+                # cache
                 current_state_for_room = {}
 
-                # map room_id->(to_delete, to_insert) where each entry is
-                # a map (type,key)->event_id giving the state delta in each
+                # map room_id->(to_delete, to_insert) where to_delete is a list
+                # of type/state keys to remove from current state, and to_insert
+                # is a map (type,key)->event_id giving the state delta in each
                 # room
                 state_delta_for_room = {}
 
@@ -422,19 +440,40 @@ class EventsStore(EventsWorkerStore):
                             logger.info(
                                 "Calculating state delta for room %s", room_id,
                             )
-                            current_state = yield self._get_new_state_after_events(
-                                room_id,
-                                ev_ctx_rm,
-                                latest_event_ids,
-                                new_latest_event_ids,
-                            )
+                            with Measure(
+                                self._clock,
+                                "persist_events.get_new_state_after_events",
+                            ):
+                                res = yield self._get_new_state_after_events(
+                                    room_id,
+                                    ev_ctx_rm,
+                                    latest_event_ids,
+                                    new_latest_event_ids,
+                                )
+                                current_state, delta_ids = res
+
+                            # If either are not None then there has been a change,
+                            # and we need to work out the delta (or use that
+                            # given)
+                            if delta_ids is not None:
+                                # If there is a delta we know that we've
+                                # only added or replaced state, never
+                                # removed keys entirely.
+                                state_delta_for_room[room_id] = ([], delta_ids)
+                            elif current_state is not None:
+                                with Measure(
+                                    self._clock,
+                                    "persist_events.calculate_state_delta",
+                                ):
+                                    delta = yield self._calculate_state_delta(
+                                        room_id, current_state,
+                                    )
+                                state_delta_for_room[room_id] = delta
+
+                            # If we have the current_state then lets prefill
+                            # the cache with it.
                             if current_state is not None:
                                 current_state_for_room[room_id] = current_state
-                                delta = yield self._calculate_state_delta(
-                                    room_id, current_state,
-                                )
-                                if delta is not None:
-                                    state_delta_for_room[room_id] = delta
 
                 yield self.runInteraction(
                     "persist_events",
@@ -446,9 +485,14 @@ class EventsStore(EventsWorkerStore):
                     new_forward_extremeties=new_forward_extremeties,
                 )
                 persist_event_counter.inc(len(chunk))
-                synapse.metrics.event_persisted_position.set(
-                    chunk[-1][0].internal_metadata.stream_ordering,
-                )
+
+                if not backfilled:
+                    # backfilled events have negative stream orderings, so we don't
+                    # want to set the event_persisted_position to that.
+                    synapse.metrics.event_persisted_position.set(
+                        chunk[-1][0].internal_metadata.stream_ordering,
+                    )
+
                 for event, context in chunk:
                     if context.app_service:
                         origin_type = "local"
@@ -501,7 +545,6 @@ class EventsStore(EventsWorkerStore):
             iterable=list(new_latest_event_ids),
             retcols=["prev_event_id"],
             keyvalues={
-                "room_id": room_id,
                 "is_state": False,
             },
             desc="_calculate_new_extremeties",
@@ -533,9 +576,15 @@ class EventsStore(EventsWorkerStore):
                 the new forward extremities for the room.
 
         Returns:
-            Deferred[dict[(str,str), str]|None]:
-                None if there are no changes to the room state, or
-                a dict of (type, state_key) -> event_id].
+            Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
+            Returns a tuple of two state maps, the first being the full new current
+            state and the second being the delta to the existing current state.
+            If both are None then there has been no change.
+
+            If there has been a change then we only return the delta if its
+            already been calculated. Conversely if we do know the delta then
+            the new current state is only returned if we've already calculated
+            it.
         """
 
         if not new_latest_event_ids:
@@ -543,18 +592,32 @@ class EventsStore(EventsWorkerStore):
 
         # map from state_group to ((type, key) -> event_id) state map
         state_groups_map = {}
+
+        # Map from (prev state group, new state group) -> delta state dict
+        state_group_deltas = {}
+
         for ev, ctx in events_context:
             if ctx.state_group is None:
-                # I don't think this can happen, but let's double-check
-                raise Exception(
-                    "Context for new extremity event %s has no state "
-                    "group" % (ev.event_id, ),
-                )
+                # This should only happen for outlier events.
+                if not ev.internal_metadata.is_outlier():
+                    raise Exception(
+                        "Context for new event %s has no state "
+                        "group" % (ev.event_id, ),
+                    )
+                continue
 
             if ctx.state_group in state_groups_map:
                 continue
 
-            state_groups_map[ctx.state_group] = ctx.current_state_ids
+            # We're only interested in pulling out state that has already
+            # been cached in the context. We'll pull stuff out of the DB later
+            # if necessary.
+            current_state_ids = ctx.get_cached_current_state_ids()
+            if current_state_ids is not None:
+                state_groups_map[ctx.state_group] = current_state_ids
+
+            if ctx.prev_group:
+                state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
 
         # We need to map the event_ids to their state groups. First, let's
         # check if the event is one we're persisting, in which case we can
@@ -569,7 +632,7 @@ class EventsStore(EventsWorkerStore):
         for event_id in new_latest_event_ids:
             # First search in the list of new events we're adding.
             for ev, ctx in events_context:
-                if event_id == ev.event_id:
+                if event_id == ev.event_id and ctx.state_group is not None:
                     event_id_to_state_group[event_id] = ctx.state_group
                     break
             else:
@@ -597,7 +660,26 @@ class EventsStore(EventsWorkerStore):
         # If they old and new groups are the same then we don't need to do
         # anything.
         if old_state_groups == new_state_groups:
-            return
+            defer.returnValue((None, None))
+
+        if len(new_state_groups) == 1 and len(old_state_groups) == 1:
+            # If we're going from one state group to another, lets check if
+            # we have a delta for that transition. If we do then we can just
+            # return that.
+
+            new_state_group = next(iter(new_state_groups))
+            old_state_group = next(iter(old_state_groups))
+
+            delta_ids = state_group_deltas.get(
+                (old_state_group, new_state_group,), None
+            )
+            if delta_ids is not None:
+                # We have a delta from the existing to new current state,
+                # so lets just return that. If we happen to already have
+                # the current state in memory then lets also return that,
+                # but it doesn't matter if we don't.
+                new_state = state_groups_map.get(new_state_group)
+                defer.returnValue((new_state, delta_ids))
 
         # Now that we have calculated new_state_groups we need to get
         # their state IDs so we can resolve to a single state set.
@@ -609,7 +691,7 @@ class EventsStore(EventsWorkerStore):
         if len(new_state_groups) == 1:
             # If there is only one state group, then we know what the current
             # state is.
-            defer.returnValue(state_groups_map[new_state_groups.pop()])
+            defer.returnValue((state_groups_map[new_state_groups.pop()], None))
 
         # Ok, we need to defer to the state handler to resolve our state sets.
 
@@ -623,12 +705,14 @@ class EventsStore(EventsWorkerStore):
         }
 
         events_map = {ev.event_id: ev for ev, _ in events_context}
+        room_version = yield self.get_room_version(room_id)
+
         logger.debug("calling resolve_state_groups from preserve_events")
         res = yield self._state_resolution_handler.resolve_state_groups(
-            room_id, state_groups, events_map, get_events
+            room_id, room_version, state_groups, events_map, get_events
         )
 
-        defer.returnValue(res.state)
+        defer.returnValue((res.state, None))
 
     @defer.inlineCallbacks
     def _calculate_state_delta(self, room_id, current_state):
@@ -637,28 +721,20 @@ class EventsStore(EventsWorkerStore):
         Assumes that we are only persisting events for one room at a time.
 
         Returns:
-            2-tuple (to_delete, to_insert) where both are state dicts,
-            i.e. (type, state_key) -> event_id. `to_delete` are the entries to
-            first be deleted from current_state_events, `to_insert` are entries
-            to insert.
+            tuple[list, dict] (to_delete, to_insert): where to_delete are the
+            type/state_keys to remove from current_state_events and `to_insert`
+            are the updates to current_state_events.
         """
         existing_state = yield self.get_current_state_ids(room_id)
 
-        existing_events = set(itervalues(existing_state))
-        new_events = set(ev_id for ev_id in itervalues(current_state))
-        changed_events = existing_events ^ new_events
-
-        if not changed_events:
-            return
+        to_delete = [
+            key for key in existing_state
+            if key not in current_state
+        ]
 
-        to_delete = {
-            key: ev_id for key, ev_id in iteritems(existing_state)
-            if ev_id in changed_events
-        }
-        events_to_insert = (new_events - existing_events)
         to_insert = {
             key: ev_id for key, ev_id in iteritems(current_state)
-            if ev_id in events_to_insert
+            if ev_id != existing_state.get(key)
         }
 
         defer.returnValue((to_delete, to_insert))
@@ -681,10 +757,10 @@ class EventsStore(EventsWorkerStore):
             delete_existing (bool): True to purge existing table rows for the
                 events from the database. This is useful when retrying due to
                 IntegrityError.
-            state_delta_for_room (dict[str, (list[str], list[str])]):
+            state_delta_for_room (dict[str, (list, dict)]):
                 The current-state delta for each room. For each room, a tuple
-                (to_delete, to_insert), being a list of event ids to be removed
-                from the current state, and a list of event ids to be added to
+                (to_delete, to_insert), being a list of type/state keys to be
+                removed from the current state, and a state set to be added to
                 the current state.
             new_forward_extremeties (dict[str, list[str]]):
                 The new forward extremities for each room. For each room, a
@@ -762,9 +838,46 @@ class EventsStore(EventsWorkerStore):
     def _update_current_state_txn(self, txn, state_delta_by_room, max_stream_order):
         for room_id, current_state_tuple in iteritems(state_delta_by_room):
                 to_delete, to_insert = current_state_tuple
+
+                # First we add entries to the current_state_delta_stream. We
+                # do this before updating the current_state_events table so
+                # that we can use it to calculate the `prev_event_id`. (This
+                # allows us to not have to pull out the existing state
+                # unnecessarily).
+                sql = """
+                    INSERT INTO current_state_delta_stream
+                    (stream_id, room_id, type, state_key, event_id, prev_event_id)
+                    SELECT ?, ?, ?, ?, ?, (
+                        SELECT event_id FROM current_state_events
+                        WHERE room_id = ? AND type = ? AND state_key = ?
+                    )
+                """
+                txn.executemany(sql, (
+                    (
+                        max_stream_order, room_id, etype, state_key, None,
+                        room_id, etype, state_key,
+                    )
+                    for etype, state_key in to_delete
+                    # We sanity check that we're deleting rather than updating
+                    if (etype, state_key) not in to_insert
+                ))
+                txn.executemany(sql, (
+                    (
+                        max_stream_order, room_id, etype, state_key, ev_id,
+                        room_id, etype, state_key,
+                    )
+                    for (etype, state_key), ev_id in iteritems(to_insert)
+                ))
+
+                # Now we actually update the current_state_events table
+
                 txn.executemany(
-                    "DELETE FROM current_state_events WHERE event_id = ?",
-                    [(ev_id,) for ev_id in itervalues(to_delete)],
+                    "DELETE FROM current_state_events"
+                    " WHERE room_id = ? AND type = ? AND state_key = ?",
+                    (
+                        (room_id, etype, state_key)
+                        for etype, state_key in itertools.chain(to_delete, to_insert)
+                    ),
                 )
 
                 self._simple_insert_many_txn(
@@ -781,26 +894,8 @@ class EventsStore(EventsWorkerStore):
                     ],
                 )
 
-                state_deltas = {key: None for key in to_delete}
-                state_deltas.update(to_insert)
-
-                self._simple_insert_many_txn(
-                    txn,
-                    table="current_state_delta_stream",
-                    values=[
-                        {
-                            "stream_id": max_stream_order,
-                            "room_id": room_id,
-                            "type": key[0],
-                            "state_key": key[1],
-                            "event_id": ev_id,
-                            "prev_event_id": to_delete.get(key, None),
-                        }
-                        for key, ev_id in iteritems(state_deltas)
-                    ]
-                )
-
-                self._curr_state_delta_stream_cache.entity_has_changed(
+                txn.call_after(
+                    self._curr_state_delta_stream_cache.entity_has_changed,
                     room_id, max_stream_order,
                 )
 
@@ -812,7 +907,8 @@ class EventsStore(EventsWorkerStore):
                 # and which we have added, then we invlidate the caches for all
                 # those users.
                 members_changed = set(
-                    state_key for ev_type, state_key in state_deltas
+                    state_key
+                    for ev_type, state_key in itertools.chain(to_delete, to_insert)
                     if ev_type == EventTypes.Member
                 )
 
@@ -985,7 +1081,7 @@ class EventsStore(EventsWorkerStore):
 
                 metadata_json = encode_json(
                     event.internal_metadata.get_dict()
-                ).decode("UTF-8")
+                )
 
                 sql = (
                     "UPDATE event_json SET internal_metadata = ?"
@@ -1044,7 +1140,6 @@ class EventsStore(EventsWorkerStore):
                 "event_edge_hashes",
                 "event_edges",
                 "event_forward_extremities",
-                "event_push_actions",
                 "event_reference_hashes",
                 "event_search",
                 "event_signatures",
@@ -1064,6 +1159,14 @@ class EventsStore(EventsWorkerStore):
                 [(ev.event_id,) for ev, _ in events_and_contexts]
             )
 
+        for table in (
+            "event_push_actions",
+        ):
+            txn.executemany(
+                "DELETE FROM %s WHERE room_id = ? AND event_id = ?" % (table,),
+                [(ev.room_id, ev.event_id) for ev, _ in events_and_contexts]
+            )
+
     def _store_event_txn(self, txn, events_and_contexts):
         """Insert new events into the event and event_json tables
 
@@ -1092,8 +1195,8 @@ class EventsStore(EventsWorkerStore):
                     "room_id": event.room_id,
                     "internal_metadata": encode_json(
                         event.internal_metadata.get_dict()
-                    ).decode("UTF-8"),
-                    "json": encode_json(event_dict(event)).decode("UTF-8"),
+                    ),
+                    "json": encode_json(event_dict(event)),
                 }
                 for event, _ in events_and_contexts
             ],
@@ -1112,13 +1215,12 @@ class EventsStore(EventsWorkerStore):
                     "type": event.type,
                     "processed": True,
                     "outlier": event.internal_metadata.is_outlier(),
-                    "content": encode_json(event.content).decode("UTF-8"),
                     "origin_server_ts": int(event.origin_server_ts),
                     "received_ts": self._clock.time_msec(),
                     "sender": event.sender,
                     "contains_url": (
                         "url" in event.content
-                        and isinstance(event.content["url"], basestring)
+                        and isinstance(event.content["url"], text_type)
                     ),
                 }
                 for event, _ in events_and_contexts
@@ -1336,88 +1438,6 @@ class EventsStore(EventsWorkerStore):
         )
 
     @defer.inlineCallbacks
-    def have_events_in_timeline(self, event_ids):
-        """Given a list of event ids, check if we have already processed and
-        stored them as non outliers.
-        """
-        rows = yield self._simple_select_many_batch(
-            table="events",
-            retcols=("event_id",),
-            column="event_id",
-            iterable=list(event_ids),
-            keyvalues={"outlier": False},
-            desc="have_events_in_timeline",
-        )
-
-        defer.returnValue(set(r["event_id"] for r in rows))
-
-    @defer.inlineCallbacks
-    def have_seen_events(self, event_ids):
-        """Given a list of event ids, check if we have already processed them.
-
-        Args:
-            event_ids (iterable[str]):
-
-        Returns:
-            Deferred[set[str]]: The events we have already seen.
-        """
-        results = set()
-
-        def have_seen_events_txn(txn, chunk):
-            sql = (
-                "SELECT event_id FROM events as e WHERE e.event_id IN (%s)"
-                % (",".join("?" * len(chunk)), )
-            )
-            txn.execute(sql, chunk)
-            for (event_id, ) in txn:
-                results.add(event_id)
-
-        # break the input up into chunks of 100
-        input_iterator = iter(event_ids)
-        for chunk in iter(lambda: list(itertools.islice(input_iterator, 100)),
-                          []):
-            yield self.runInteraction(
-                "have_seen_events",
-                have_seen_events_txn,
-                chunk,
-            )
-        defer.returnValue(results)
-
-    def get_seen_events_with_rejections(self, event_ids):
-        """Given a list of event ids, check if we rejected them.
-
-        Args:
-            event_ids (list[str])
-
-        Returns:
-            Deferred[dict[str, str|None):
-                Has an entry for each event id we already have seen. Maps to
-                the rejected reason string if we rejected the event, else maps
-                to None.
-        """
-        if not event_ids:
-            return defer.succeed({})
-
-        def f(txn):
-            sql = (
-                "SELECT e.event_id, reason FROM events as e "
-                "LEFT JOIN rejections as r ON e.event_id = r.event_id "
-                "WHERE e.event_id = ?"
-            )
-
-            res = {}
-            for event_id in event_ids:
-                txn.execute(sql, (event_id,))
-                row = txn.fetchone()
-                if row:
-                    _, rejected = row
-                    res[event_id] = rejected
-
-            return res
-
-        return self.runInteraction("get_rejection_reasons", f)
-
-    @defer.inlineCallbacks
     def count_daily_messages(self):
         """
         Returns an estimate of the number of messages sent in the last day.
@@ -1509,7 +1529,7 @@ class EventsStore(EventsWorkerStore):
 
                     contains_url = "url" in content
                     if contains_url:
-                        contains_url &= isinstance(content["url"], basestring)
+                        contains_url &= isinstance(content["url"], text_type)
                 except (KeyError, AttributeError):
                     # If the event is missing a necessary field then
                     # skip over it.
@@ -1890,10 +1910,10 @@ class EventsStore(EventsWorkerStore):
             (room_id,)
         )
         rows = txn.fetchall()
-        max_depth = max(row[0] for row in rows)
+        max_depth = max(row[1] for row in rows)
 
-        if max_depth <= token.topological:
-            # We need to ensure we don't delete all the events from the datanase
+        if max_depth < token.topological:
+            # We need to ensure we don't delete all the events from the database
             # otherwise we wouldn't be able to send any events (due to not
             # having any backwards extremeties)
             raise SynapseError(
diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py
index 32d9d00ffb..a8326f5296 100644
--- a/synapse/storage/events_worker.py
+++ b/synapse/storage/events_worker.py
@@ -12,27 +12,31 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from ._base import SQLBaseStore
 
-from twisted.internet import defer, reactor
+import itertools
+import logging
+from collections import namedtuple
+
+from canonicaljson import json
 
+from twisted.internet import defer
+
+from synapse.api.errors import NotFoundError
+# these are only included to make the type annotations work
+from synapse.events import EventBase  # noqa: F401
 from synapse.events import FrozenEvent
+from synapse.events.snapshot import EventContext  # noqa: F401
 from synapse.events.utils import prune_event
-
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.util.logcontext import (
-    PreserveLoggingContext, make_deferred_yieldable, run_in_background,
+    LoggingContext,
+    PreserveLoggingContext,
+    make_deferred_yieldable,
+    run_in_background,
 )
 from synapse.util.metrics import Measure
-from synapse.api.errors import SynapseError
 
-from collections import namedtuple
-
-import logging
-import simplejson as json
-
-# these are only included to make the type annotations work
-from synapse.events import EventBase    # noqa: F401
-from synapse.events.snapshot import EventContext   # noqa: F401
+from ._base import SQLBaseStore
 
 logger = logging.getLogger(__name__)
 
@@ -75,7 +79,7 @@ class EventsWorkerStore(SQLBaseStore):
     @defer.inlineCallbacks
     def get_event(self, event_id, check_redacted=True,
                   get_prev_content=False, allow_rejected=False,
-                  allow_none=False):
+                  allow_none=False, check_room_id=None):
         """Get an event from the database by event_id.
 
         Args:
@@ -86,7 +90,9 @@ class EventsWorkerStore(SQLBaseStore):
                 include the previous states content in the unsigned field.
             allow_rejected (bool): If True return rejected events.
             allow_none (bool): If True, return None if no event found, if
-                False throw an exception.
+                False throw a NotFoundError
+            check_room_id (str|None): if not None, check the room of the found event.
+                If there is a mismatch, behave as per allow_none.
 
         Returns:
             Deferred : A FrozenEvent.
@@ -98,10 +104,16 @@ class EventsWorkerStore(SQLBaseStore):
             allow_rejected=allow_rejected,
         )
 
-        if not events and not allow_none:
-            raise SynapseError(404, "Could not find event %s" % (event_id,))
+        event = events[0] if events else None
+
+        if event is not None and check_room_id is not None:
+            if event.room_id != check_room_id:
+                event = None
 
-        defer.returnValue(events[0] if events else None)
+        if event is None and not allow_none:
+            raise NotFoundError("Could not find event %s" % (event_id,))
+
+        defer.returnValue(event)
 
     @defer.inlineCallbacks
     def get_events(self, event_ids, check_redacted=True,
@@ -145,6 +157,9 @@ class EventsWorkerStore(SQLBaseStore):
         missing_events_ids = [e for e in event_ids if e not in event_entry_map]
 
         if missing_events_ids:
+            log_ctx = LoggingContext.current_context()
+            log_ctx.record_event_fetch(len(missing_events_ids))
+
             missing_events = yield self._enqueue_events(
                 missing_events_ids,
                 check_redacted=check_redacted,
@@ -218,32 +233,47 @@ class EventsWorkerStore(SQLBaseStore):
         """Takes a database connection and waits for requests for events from
         the _event_fetch_list queue.
         """
-        event_list = []
         i = 0
         while True:
+            with self._event_fetch_lock:
+                event_list = self._event_fetch_list
+                self._event_fetch_list = []
+
+                if not event_list:
+                    single_threaded = self.database_engine.single_threaded
+                    if single_threaded or i > EVENT_QUEUE_ITERATIONS:
+                        self._event_fetch_ongoing -= 1
+                        return
+                    else:
+                        self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S)
+                        i += 1
+                        continue
+                i = 0
+
+            self._fetch_event_list(conn, event_list)
+
+    def _fetch_event_list(self, conn, event_list):
+        """Handle a load of requests from the _event_fetch_list queue
+
+        Args:
+            conn (twisted.enterprise.adbapi.Connection): database connection
+
+            event_list (list[Tuple[list[str], Deferred]]):
+                The fetch requests. Each entry consists of a list of event
+                ids to be fetched, and a deferred to be completed once the
+                events have been fetched.
+
+        """
+        with Measure(self._clock, "_fetch_event_list"):
             try:
-                with self._event_fetch_lock:
-                    event_list = self._event_fetch_list
-                    self._event_fetch_list = []
-
-                    if not event_list:
-                        single_threaded = self.database_engine.single_threaded
-                        if single_threaded or i > EVENT_QUEUE_ITERATIONS:
-                            self._event_fetch_ongoing -= 1
-                            return
-                        else:
-                            self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S)
-                            i += 1
-                            continue
-                    i = 0
-
-                event_id_lists = zip(*event_list)[0]
+                event_id_lists = list(zip(*event_list))[0]
                 event_ids = [
                     item for sublist in event_id_lists for item in sublist
                 ]
 
                 rows = self._new_transaction(
-                    conn, "do_fetch", [], [], None, self._fetch_event_rows, event_ids
+                    conn, "do_fetch", [], [],
+                    self._fetch_event_rows, event_ids,
                 )
 
                 row_dict = {
@@ -265,20 +295,19 @@ class EventsWorkerStore(SQLBaseStore):
                             except Exception:
                                 logger.exception("Failed to callback")
                 with PreserveLoggingContext():
-                    reactor.callFromThread(fire, event_list, row_dict)
+                    self.hs.get_reactor().callFromThread(fire, event_list, row_dict)
             except Exception as e:
                 logger.exception("do_fetch")
 
                 # We only want to resolve deferreds from the main thread
-                def fire(evs):
+                def fire(evs, exc):
                     for _, d in evs:
                         if not d.called:
                             with PreserveLoggingContext():
-                                d.errback(e)
+                                d.errback(exc)
 
-                if event_list:
-                    with PreserveLoggingContext():
-                        reactor.callFromThread(fire, event_list)
+                with PreserveLoggingContext():
+                    self.hs.get_reactor().callFromThread(fire, event_list, e)
 
     @defer.inlineCallbacks
     def _enqueue_events(self, events, check_redacted=True, allow_rejected=False):
@@ -304,10 +333,11 @@ class EventsWorkerStore(SQLBaseStore):
                 should_start = False
 
         if should_start:
-            with PreserveLoggingContext():
-                self.runWithConnection(
-                    self._do_fetch
-                )
+            run_as_background_process(
+                "fetch_events",
+                self.runWithConnection,
+                self._do_fetch,
+            )
 
         logger.debug("Loading %d events", len(events))
         with PreserveLoggingContext():
@@ -414,3 +444,85 @@ class EventsWorkerStore(SQLBaseStore):
             self._get_event_cache.prefill((original_ev.event_id,), cache_entry)
 
         defer.returnValue(cache_entry)
+
+    @defer.inlineCallbacks
+    def have_events_in_timeline(self, event_ids):
+        """Given a list of event ids, check if we have already processed and
+        stored them as non outliers.
+        """
+        rows = yield self._simple_select_many_batch(
+            table="events",
+            retcols=("event_id",),
+            column="event_id",
+            iterable=list(event_ids),
+            keyvalues={"outlier": False},
+            desc="have_events_in_timeline",
+        )
+
+        defer.returnValue(set(r["event_id"] for r in rows))
+
+    @defer.inlineCallbacks
+    def have_seen_events(self, event_ids):
+        """Given a list of event ids, check if we have already processed them.
+
+        Args:
+            event_ids (iterable[str]):
+
+        Returns:
+            Deferred[set[str]]: The events we have already seen.
+        """
+        results = set()
+
+        def have_seen_events_txn(txn, chunk):
+            sql = (
+                "SELECT event_id FROM events as e WHERE e.event_id IN (%s)"
+                % (",".join("?" * len(chunk)), )
+            )
+            txn.execute(sql, chunk)
+            for (event_id, ) in txn:
+                results.add(event_id)
+
+        # break the input up into chunks of 100
+        input_iterator = iter(event_ids)
+        for chunk in iter(lambda: list(itertools.islice(input_iterator, 100)),
+                          []):
+            yield self.runInteraction(
+                "have_seen_events",
+                have_seen_events_txn,
+                chunk,
+            )
+        defer.returnValue(results)
+
+    def get_seen_events_with_rejections(self, event_ids):
+        """Given a list of event ids, check if we rejected them.
+
+        Args:
+            event_ids (list[str])
+
+        Returns:
+            Deferred[dict[str, str|None):
+                Has an entry for each event id we already have seen. Maps to
+                the rejected reason string if we rejected the event, else maps
+                to None.
+        """
+        if not event_ids:
+            return defer.succeed({})
+
+        def f(txn):
+            sql = (
+                "SELECT e.event_id, reason FROM events as e "
+                "LEFT JOIN rejections as r ON e.event_id = r.event_id "
+                "WHERE e.event_id = ?"
+            )
+
+            res = {}
+            for event_id in event_ids:
+                txn.execute(sql, (event_id,))
+                row = txn.fetchone()
+                if row:
+                    _, rejected = row
+                    res[event_id] = rejected
+
+            return res
+
+        return self.runInteraction("get_rejection_reasons", f)
diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py
index 2e2763126d..6ddcc909bf 100644
--- a/synapse/storage/filtering.py
+++ b/synapse/storage/filtering.py
@@ -13,14 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from canonicaljson import encode_canonical_json
+
 from twisted.internet import defer
 
-from ._base import SQLBaseStore
-from synapse.api.errors import SynapseError, Codes
+from synapse.api.errors import Codes, SynapseError
 from synapse.util.caches.descriptors import cachedInlineCallbacks
 
-from canonicaljson import encode_canonical_json
-import simplejson as json
+from ._base import SQLBaseStore, db_to_json
 
 
 class FilteringStore(SQLBaseStore):
@@ -44,7 +44,7 @@ class FilteringStore(SQLBaseStore):
             desc="get_user_filter",
         )
 
-        defer.returnValue(json.loads(bytes(def_json).decode("utf-8")))
+        defer.returnValue(db_to_json(def_json))
 
     def add_user_filter(self, user_localpart, user_filter):
         def_json = encode_canonical_json(user_filter)
diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py
index da05ccb027..592d1b4c2a 100644
--- a/synapse/storage/group_server.py
+++ b/synapse/storage/group_server.py
@@ -14,15 +14,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from canonicaljson import json
+
 from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
 
 from ._base import SQLBaseStore
 
-import simplejson as json
-
-
 # The category ID for the "default" category. We don't store as null in the
 # database to avoid the fun of null != null
 _DEFAULT_CATEGORY_ID = ""
diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py
index 0f13b61da8..f547977600 100644
--- a/synapse/storage/keys.py
+++ b/synapse/storage/keys.py
@@ -13,17 +13,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import SQLBaseStore
-from synapse.util.caches.descriptors import cachedInlineCallbacks
+import hashlib
+import logging
 
-from twisted.internet import defer
 import six
 
-import OpenSSL
 from signedjson.key import decode_verify_key_bytes
-import hashlib
 
-import logging
+import OpenSSL
+from twisted.internet import defer
+
+from synapse.util.caches.descriptors import cachedInlineCallbacks
+
+from ._base import SQLBaseStore
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py
new file mode 100644
index 0000000000..c7899d7fd2
--- /dev/null
+++ b/synapse/storage/monthly_active_users.py
@@ -0,0 +1,221 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from twisted.internet import defer
+
+from synapse.util.caches.descriptors import cached
+
+from ._base import SQLBaseStore
+
+logger = logging.getLogger(__name__)
+
+# Number of msec of granularity to store the monthly_active_user timestamp
+# This means it is not necessary to update the table on every request
+LAST_SEEN_GRANULARITY = 60 * 60 * 1000
+
+
+class MonthlyActiveUsersStore(SQLBaseStore):
+    def __init__(self, dbconn, hs):
+        super(MonthlyActiveUsersStore, self).__init__(None, hs)
+        self._clock = hs.get_clock()
+        self.hs = hs
+        self.reserved_users = ()
+
+    @defer.inlineCallbacks
+    def initialise_reserved_users(self, threepids):
+        store = self.hs.get_datastore()
+        reserved_user_list = []
+
+        # Do not add more reserved users than the total allowable number
+        for tp in threepids[:self.hs.config.max_mau_value]:
+            user_id = yield store.get_user_id_by_threepid(
+                tp["medium"], tp["address"]
+            )
+            if user_id:
+                yield self.upsert_monthly_active_user(user_id)
+                reserved_user_list.append(user_id)
+            else:
+                logger.warning(
+                    "mau limit reserved threepid %s not found in db" % tp
+                )
+        self.reserved_users = tuple(reserved_user_list)
+
+    @defer.inlineCallbacks
+    def reap_monthly_active_users(self):
+        """
+        Cleans out monthly active user table to ensure that no stale
+        entries exist.
+
+        Returns:
+            Deferred[]
+        """
+        def _reap_users(txn):
+            # Purge stale users
+
+            thirty_days_ago = (
+                int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
+            )
+            query_args = [thirty_days_ago]
+            base_sql = "DELETE FROM monthly_active_users WHERE timestamp < ?"
+
+            # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres
+            # when len(reserved_users) == 0. Works fine on sqlite.
+            if len(self.reserved_users) > 0:
+                # questionmarks is a hack to overcome sqlite not supporting
+                # tuples in 'WHERE IN %s'
+                questionmarks = '?' * len(self.reserved_users)
+
+                query_args.extend(self.reserved_users)
+                sql = base_sql + """ AND user_id NOT IN ({})""".format(
+                    ','.join(questionmarks)
+                )
+            else:
+                sql = base_sql
+
+            txn.execute(sql, query_args)
+
+            # If MAU user count still exceeds the MAU threshold, then delete on
+            # a least recently active basis.
+            # Note it is not possible to write this query using OFFSET due to
+            # incompatibilities in how sqlite and postgres support the feature.
+            # sqlite requires 'LIMIT -1 OFFSET ?', the LIMIT must be present
+            # While Postgres does not require 'LIMIT', but also does not support
+            # negative LIMIT values. So there is no way to write it that both can
+            # support
+            safe_guard = self.hs.config.max_mau_value - len(self.reserved_users)
+            # Must be greater than zero for postgres
+            safe_guard = safe_guard if safe_guard > 0 else 0
+            query_args = [safe_guard]
+
+            base_sql = """
+                DELETE FROM monthly_active_users
+                WHERE user_id NOT IN (
+                    SELECT user_id FROM monthly_active_users
+                    ORDER BY timestamp DESC
+                    LIMIT ?
+                    )
+                """
+            # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres
+            # when len(reserved_users) == 0. Works fine on sqlite.
+            if len(self.reserved_users) > 0:
+                query_args.extend(self.reserved_users)
+                sql = base_sql + """ AND user_id NOT IN ({})""".format(
+                    ','.join(questionmarks)
+                )
+            else:
+                sql = base_sql
+            txn.execute(sql, query_args)
+
+        yield self.runInteraction("reap_monthly_active_users", _reap_users)
+        # It seems poor to invalidate the whole cache, Postgres supports
+        # 'Returning' which would allow me to invalidate only the
+        # specific users, but sqlite has no way to do this and instead
+        # I would need to SELECT and the DELETE which without locking
+        # is racy.
+        # Have resolved to invalidate the whole cache for now and do
+        # something about it if and when the perf becomes significant
+        self.user_last_seen_monthly_active.invalidate_all()
+        self.get_monthly_active_count.invalidate_all()
+
+    @cached(num_args=0)
+    def get_monthly_active_count(self):
+        """Generates current count of monthly active users
+
+        Returns:
+            Defered[int]: Number of current monthly active users
+        """
+
+        def _count_users(txn):
+            sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users"
+
+            txn.execute(sql)
+            count, = txn.fetchone()
+            return count
+        return self.runInteraction("count_users", _count_users)
+
+    @defer.inlineCallbacks
+    def upsert_monthly_active_user(self, user_id):
+        """
+            Updates or inserts monthly active user member
+            Arguments:
+                user_id (str): user to add/update
+            Deferred[bool]: True if a new entry was created, False if an
+                existing one was updated.
+        """
+        is_insert = yield self._simple_upsert(
+            desc="upsert_monthly_active_user",
+            table="monthly_active_users",
+            keyvalues={
+                "user_id": user_id,
+            },
+            values={
+                "timestamp": int(self._clock.time_msec()),
+            },
+            lock=False,
+        )
+        if is_insert:
+            self.user_last_seen_monthly_active.invalidate((user_id,))
+            self.get_monthly_active_count.invalidate(())
+
+    @cached(num_args=1)
+    def user_last_seen_monthly_active(self, user_id):
+        """
+            Checks if a given user is part of the monthly active user group
+            Arguments:
+                user_id (str): user to add/update
+            Return:
+                Deferred[int] : timestamp since last seen, None if never seen
+
+        """
+
+        return(self._simple_select_one_onecol(
+            table="monthly_active_users",
+            keyvalues={
+                "user_id": user_id,
+            },
+            retcol="timestamp",
+            allow_none=True,
+            desc="user_last_seen_monthly_active",
+        ))
+
+    @defer.inlineCallbacks
+    def populate_monthly_active_users(self, user_id):
+        """Checks on the state of monthly active user limits and optionally
+        add the user to the monthly active tables
+
+        Args:
+            user_id(str): the user_id to query
+        """
+        if self.hs.config.limit_usage_by_mau:
+            is_trial = yield self.is_trial_user(user_id)
+            if is_trial:
+                # we don't track trial users in the MAU table.
+                return
+
+            last_seen_timestamp = yield self.user_last_seen_monthly_active(user_id)
+            now = self.hs.get_clock().time_msec()
+
+            # We want to reduce to the total number of db writes, and are happy
+            # to trade accuracy of timestamp in order to lighten load. This means
+            # We always insert new users (where MAU threshold has not been reached),
+            # but only update if we have not previously seen the user for
+            # LAST_SEEN_GRANULARITY ms
+            if last_seen_timestamp is None:
+                count = yield self.get_monthly_active_count()
+                if count < self.hs.config.max_mau_value:
+                    yield self.upsert_monthly_active_user(user_id)
+            elif now - last_seen_timestamp > LAST_SEEN_GRANULARITY:
+                yield self.upsert_monthly_active_user(user_id)
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index cf2aae0468..b364719312 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -20,13 +20,12 @@ import logging
 import os
 import re
 
-
 logger = logging.getLogger(__name__)
 
 
 # Remember to update this number every time a change is made to database
 # schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 50
+SCHEMA_VERSION = 51
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py
index f05d91cc58..a0c7a0dc87 100644
--- a/synapse/storage/presence.py
+++ b/synapse/storage/presence.py
@@ -13,13 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import SQLBaseStore
+from collections import namedtuple
+
+from twisted.internet import defer
+
 from synapse.api.constants import PresenceState
-from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
 from synapse.util import batch_iter
+from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
 
-from collections import namedtuple
-from twisted.internet import defer
+from ._base import SQLBaseStore
 
 
 class UserPresenceState(namedtuple("UserPresenceState",
diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py
index 8612bd5ecc..88b50f33b5 100644
--- a/synapse/storage/profile.py
+++ b/synapse/storage/profile.py
@@ -15,8 +15,8 @@
 
 from twisted.internet import defer
 
-from synapse.storage.roommember import ProfileInfo
 from synapse.api.errors import StoreError
+from synapse.storage.roommember import ProfileInfo
 
 from ._base import SQLBaseStore
 
@@ -71,8 +71,6 @@ class ProfileWorkerStore(SQLBaseStore):
             desc="get_from_remote_profile_cache",
         )
 
-
-class ProfileStore(ProfileWorkerStore):
     def create_profile(self, user_localpart):
         return self._simple_insert(
             table="profiles",
@@ -96,6 +94,8 @@ class ProfileStore(ProfileWorkerStore):
             desc="set_profile_avatar_url",
         )
 
+
+class ProfileStore(ProfileWorkerStore):
     def add_remote_profile_cache(self, user_id, displayname, avatar_url):
         """Ensure we are caching the remote user's profiles.
 
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
index 04a0b59a39..6a5028961d 100644
--- a/synapse/storage/push_rule.py
+++ b/synapse/storage/push_rule.py
@@ -14,20 +14,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import SQLBaseStore
+import abc
+import logging
+
+from canonicaljson import json
+
+from twisted.internet import defer
+
+from synapse.push.baserules import list_with_base_rules
 from synapse.storage.appservice import ApplicationServiceWorkerStore
 from synapse.storage.pusher import PusherWorkerStore
 from synapse.storage.receipts import ReceiptsWorkerStore
 from synapse.storage.roommember import RoomMemberWorkerStore
 from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
 from synapse.util.caches.stream_change_cache import StreamChangeCache
-from synapse.push.baserules import list_with_base_rules
-from synapse.api.constants import EventTypes
-from twisted.internet import defer
 
-import abc
-import logging
-import simplejson as json
+from ._base import SQLBaseStore
 
 logger = logging.getLogger(__name__)
 
@@ -183,6 +185,7 @@ class PushRulesWorkerStore(ApplicationServiceWorkerStore,
 
         defer.returnValue(results)
 
+    @defer.inlineCallbacks
     def bulk_get_push_rules_for_room(self, event, context):
         state_group = context.state_group
         if not state_group:
@@ -192,9 +195,11 @@ class PushRulesWorkerStore(ApplicationServiceWorkerStore,
             # To do this we set the state_group to a new object as object() != object()
             state_group = object()
 
-        return self._bulk_get_push_rules_for_room(
-            event.room_id, state_group, context.current_state_ids, event=event
+        current_state_ids = yield context.get_current_state_ids(self)
+        result = yield self._bulk_get_push_rules_for_room(
+            event.room_id, state_group, current_state_ids, event=event
         )
+        defer.returnValue(result)
 
     @cachedInlineCallbacks(num_args=2, cache_context=True)
     def _bulk_get_push_rules_for_room(self, room_id, state_group, current_state_ids,
@@ -244,18 +249,6 @@ class PushRulesWorkerStore(ApplicationServiceWorkerStore,
             if uid in local_users_in_room:
                 user_ids.add(uid)
 
-        forgotten = yield self.who_forgot_in_room(
-            event.room_id, on_invalidate=cache_context.invalidate,
-        )
-
-        for row in forgotten:
-            user_id = row["user_id"]
-            event_id = row["event_id"]
-
-            mem_id = current_state_ids.get((EventTypes.Member, user_id), None)
-            if event_id == mem_id:
-                user_ids.discard(user_id)
-
         rules_by_user = yield self.bulk_get_push_rules(
             user_ids, on_invalidate=cache_context.invalidate,
         )
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index 307660b99a..c7987bfcdd 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -14,19 +14,25 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import SQLBaseStore
-from twisted.internet import defer
+import logging
 
-from canonicaljson import encode_canonical_json
+import six
+
+from canonicaljson import encode_canonical_json, json
+
+from twisted.internet import defer
 
 from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
 
-import logging
-import simplejson as json
-import types
+from ._base import SQLBaseStore
 
 logger = logging.getLogger(__name__)
 
+if six.PY2:
+    db_binary_type = buffer
+else:
+    db_binary_type = memoryview
+
 
 class PusherWorkerStore(SQLBaseStore):
     def _decode_pushers_rows(self, rows):
@@ -34,18 +40,18 @@ class PusherWorkerStore(SQLBaseStore):
             dataJson = r['data']
             r['data'] = None
             try:
-                if isinstance(dataJson, types.BufferType):
+                if isinstance(dataJson, db_binary_type):
                     dataJson = str(dataJson).decode("UTF8")
 
                 r['data'] = json.loads(dataJson)
             except Exception as e:
                 logger.warn(
                     "Invalid JSON in data for pusher %d: %s, %s",
-                    r['id'], dataJson, e.message,
+                    r['id'], dataJson, e.args[0],
                 )
                 pass
 
-            if isinstance(r['pushkey'], types.BufferType):
+            if isinstance(r['pushkey'], db_binary_type):
                 r['pushkey'] = str(r['pushkey']).decode("UTF8")
 
         return rows
@@ -233,7 +239,7 @@ class PusherStore(PusherWorkerStore):
             )
 
             if newly_inserted:
-                self.runInteraction(
+                yield self.runInteraction(
                     "add_pusher",
                     self._invalidate_cache_and_stream,
                     self.get_if_user_has_pusher, (user_id,)
diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py
index c93c228f6e..0ac665e967 100644
--- a/synapse/storage/receipts.py
+++ b/synapse/storage/receipts.py
@@ -14,17 +14,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import SQLBaseStore
-from .util.id_generators import StreamIdGenerator
-from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached
-from synapse.util.caches.stream_change_cache import StreamChangeCache
+import abc
+import logging
+
+from canonicaljson import json
 
 from twisted.internet import defer
 
-import abc
-import logging
-import simplejson as json
+from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
+from synapse.util.caches.stream_change_cache import StreamChangeCache
 
+from ._base import SQLBaseStore
+from .util.id_generators import StreamIdGenerator
 
 logger = logging.getLogger(__name__)
 
@@ -139,7 +140,9 @@ class ReceiptsWorkerStore(SQLBaseStore):
         """
         room_ids = set(room_ids)
 
-        if from_key:
+        if from_key is not None:
+            # Only ask the database about rooms where there have been new
+            # receipts added since `from_key`
             room_ids = yield self._receipts_stream_cache.get_entities_changed(
                 room_ids, from_key
             )
@@ -150,7 +153,6 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
         defer.returnValue([ev for res in results.values() for ev in res])
 
-    @cachedInlineCallbacks(num_args=3, tree=True)
     def get_linearized_receipts_for_room(self, room_id, to_key, from_key=None):
         """Get receipts for a single room for sending to clients.
 
@@ -161,7 +163,19 @@ class ReceiptsWorkerStore(SQLBaseStore):
                 from the start.
 
         Returns:
-            list: A list of receipts.
+            Deferred[list]: A list of receipts.
+        """
+        if from_key is not None:
+            # Check the cache first to see if any new receipts have been added
+            # since`from_key`. If not we can no-op.
+            if not self._receipts_stream_cache.has_entity_changed(room_id, from_key):
+                defer.succeed([])
+
+        return self._get_linearized_receipts_for_room(room_id, to_key, from_key)
+
+    @cachedInlineCallbacks(num_args=3, tree=True)
+    def _get_linearized_receipts_for_room(self, room_id, to_key, from_key=None):
+        """See get_linearized_receipts_for_room
         """
         def f(txn):
             if from_key:
@@ -210,7 +224,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
             "content": content,
         }])
 
-    @cachedList(cached_method_name="get_linearized_receipts_for_room",
+    @cachedList(cached_method_name="_get_linearized_receipts_for_room",
                 list_name="room_ids", num_args=3, inlineCallbacks=True)
     def _get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None):
         if not room_ids:
@@ -372,7 +386,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
             self.get_receipts_for_user.invalidate, (user_id, receipt_type)
         )
         # FIXME: This shouldn't invalidate the whole cache
-        txn.call_after(self.get_linearized_receipts_for_room.invalidate_many, (room_id,))
+        txn.call_after(self._get_linearized_receipts_for_room.invalidate_many, (room_id,))
 
         txn.call_after(
             self._receipts_stream_cache.entity_has_changed,
@@ -492,7 +506,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
             self.get_receipts_for_user.invalidate, (user_id, receipt_type)
         )
         # FIXME: This shouldn't invalidate the whole cache
-        txn.call_after(self.get_linearized_receipts_for_room.invalidate_many, (room_id,))
+        txn.call_after(self._get_linearized_receipts_for_room.invalidate_many, (room_id,))
 
         self._simple_delete_txn(
             txn,
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index c241167fbe..26b429e307 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -15,17 +15,22 @@
 
 import re
 
+from six.moves import range
+
 from twisted.internet import defer
 
-from synapse.api.errors import StoreError, Codes
+from synapse.api.errors import Codes, StoreError
 from synapse.storage import background_updates
 from synapse.storage._base import SQLBaseStore
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 
-from six.moves import range
-
 
 class RegistrationWorkerStore(SQLBaseStore):
+    def __init__(self, db_conn, hs):
+        super(RegistrationWorkerStore, self).__init__(db_conn, hs)
+
+        self.config = hs.config
+
     @cached()
     def get_user_by_id(self, user_id):
         return self._simple_select_one(
@@ -36,12 +41,33 @@ class RegistrationWorkerStore(SQLBaseStore):
             retcols=[
                 "name", "password_hash", "is_guest",
                 "consent_version", "consent_server_notice_sent",
-                "appservice_id",
+                "appservice_id", "creation_ts",
             ],
             allow_none=True,
             desc="get_user_by_id",
         )
 
+    @defer.inlineCallbacks
+    def is_trial_user(self, user_id):
+        """Checks if user is in the "trial" period, i.e. within the first
+        N days of registration defined by `mau_trial_days` config
+
+        Args:
+            user_id (str)
+
+        Returns:
+            Deferred[bool]
+        """
+
+        info = yield self.get_user_by_id(user_id)
+        if not info:
+            defer.returnValue(False)
+
+        now = self.clock.time_msec()
+        trial_duration_ms = self.config.mau_trial_days * 24 * 60 * 60 * 1000
+        is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms
+        defer.returnValue(is_trial)
+
     @cached()
     def get_user_by_access_token(self, token):
         """Get a user from the given access token.
@@ -460,15 +486,6 @@ class RegistrationStore(RegistrationWorkerStore,
             defer.returnValue(ret['user_id'])
         defer.returnValue(None)
 
-    def user_delete_threepids(self, user_id):
-        return self._simple_delete(
-            "user_threepids",
-            keyvalues={
-                "user_id": user_id,
-            },
-            desc="user_delete_threepids",
-        )
-
     def user_delete_threepid(self, user_id, medium, address):
         return self._simple_delete(
             "user_threepids",
@@ -632,7 +649,9 @@ class RegistrationStore(RegistrationWorkerStore,
         Removes the given user to the table of users who need to be parted from all the
         rooms they're in, effectively marking that user as fully deactivated.
         """
-        return self._simple_delete_one(
+        # XXX: This should be simple_delete_one but we failed to put a unique index on
+        # the table, so somehow duplicate entries have ended up in it.
+        return self._simple_delete(
             "users_pending_deactivation",
             keyvalues={
                 "user_id": user_id,
diff --git a/synapse/storage/rejections.py b/synapse/storage/rejections.py
index 40acb5c4ed..880f047adb 100644
--- a/synapse/storage/rejections.py
+++ b/synapse/storage/rejections.py
@@ -13,10 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import SQLBaseStore
-
 import logging
 
+from ._base import SQLBaseStore
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index ea6a189185..61013b8919 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -13,6 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import collections
+import logging
+import re
+
+from canonicaljson import json
+
 from twisted.internet import defer
 
 from synapse.api.errors import StoreError
@@ -20,11 +26,6 @@ from synapse.storage._base import SQLBaseStore
 from synapse.storage.search import SearchStore
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 
-import collections
-import logging
-import simplejson as json
-import re
-
 logger = logging.getLogger(__name__)
 
 
@@ -40,6 +41,22 @@ RatelimitOverride = collections.namedtuple(
 
 
 class RoomWorkerStore(SQLBaseStore):
+    def get_room(self, room_id):
+        """Retrieve a room.
+
+        Args:
+            room_id (str): The ID of the room to retrieve.
+        Returns:
+            A namedtuple containing the room information, or an empty list.
+        """
+        return self._simple_select_one(
+            table="rooms",
+            keyvalues={"room_id": room_id},
+            retcols=("room_id", "is_public", "creator"),
+            desc="get_room",
+            allow_none=True,
+        )
+
     def get_public_room_ids(self):
         return self._simple_select_onecol(
             table="rooms",
@@ -169,6 +186,35 @@ class RoomWorkerStore(SQLBaseStore):
             desc="is_room_blocked",
         )
 
+    @cachedInlineCallbacks(max_entries=10000)
+    def get_ratelimit_for_user(self, user_id):
+        """Check if there are any overrides for ratelimiting for the given
+        user
+
+        Args:
+            user_id (str)
+
+        Returns:
+            RatelimitOverride if there is an override, else None. If the contents
+            of RatelimitOverride are None or 0 then ratelimitng has been
+            disabled for that user entirely.
+        """
+        row = yield self._simple_select_one(
+            table="ratelimit_override",
+            keyvalues={"user_id": user_id},
+            retcols=("messages_per_second", "burst_count"),
+            allow_none=True,
+            desc="get_ratelimit_for_user",
+        )
+
+        if row:
+            defer.returnValue(RatelimitOverride(
+                messages_per_second=row["messages_per_second"],
+                burst_count=row["burst_count"],
+            ))
+        else:
+            defer.returnValue(None)
+
 
 class RoomStore(RoomWorkerStore, SearchStore):
 
@@ -214,22 +260,6 @@ class RoomStore(RoomWorkerStore, SearchStore):
             logger.error("store_room with room_id=%s failed: %s", room_id, e)
             raise StoreError(500, "Problem creating room.")
 
-    def get_room(self, room_id):
-        """Retrieve a room.
-
-        Args:
-            room_id (str): The ID of the room to retrieve.
-        Returns:
-            A namedtuple containing the room information, or an empty list.
-        """
-        return self._simple_select_one(
-            table="rooms",
-            keyvalues={"room_id": room_id},
-            retcols=("room_id", "is_public", "creator"),
-            desc="get_room",
-            allow_none=True,
-        )
-
     @defer.inlineCallbacks
     def set_room_is_public(self, room_id, is_public):
         def set_room_is_public_txn(txn, next_id):
@@ -468,35 +498,6 @@ class RoomStore(RoomWorkerStore, SearchStore):
             "get_all_new_public_rooms", get_all_new_public_rooms
         )
 
-    @cachedInlineCallbacks(max_entries=10000)
-    def get_ratelimit_for_user(self, user_id):
-        """Check if there are any overrides for ratelimiting for the given
-        user
-
-        Args:
-            user_id (str)
-
-        Returns:
-            RatelimitOverride if there is an override, else None. If the contents
-            of RatelimitOverride are None or 0 then ratelimitng has been
-            disabled for that user entirely.
-        """
-        row = yield self._simple_select_one(
-            table="ratelimit_override",
-            keyvalues={"user_id": user_id},
-            retcols=("messages_per_second", "burst_count"),
-            allow_none=True,
-            desc="get_ratelimit_for_user",
-        )
-
-        if row:
-            defer.returnValue(RatelimitOverride(
-                messages_per_second=row["messages_per_second"],
-                burst_count=row["burst_count"],
-            ))
-        else:
-            defer.returnValue(None)
-
     @defer.inlineCallbacks
     def block_room(self, room_id, user_id):
         yield self._simple_insert(
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 48a88f755e..9b4e6d6aa8 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -14,23 +14,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
-
+import logging
 from collections import namedtuple
 
-from synapse.storage.events import EventsWorkerStore
-from synapse.util.async import Linearizer
-from synapse.util.caches import intern_string
-from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
-from synapse.util.stringutils import to_ascii
+from six import iteritems, itervalues
 
-from synapse.api.constants import Membership, EventTypes
-from synapse.types import get_domain_from_id
+from canonicaljson import json
 
-import logging
-import simplejson as json
+from twisted.internet import defer
 
-from six import itervalues, iteritems
+from synapse.api.constants import EventTypes, Membership
+from synapse.storage.events_worker import EventsWorkerStore
+from synapse.types import get_domain_from_id
+from synapse.util.async_helpers import Linearizer
+from synapse.util.caches import intern_string
+from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
+from synapse.util.stringutils import to_ascii
 
 logger = logging.getLogger(__name__)
 
@@ -233,6 +232,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
         defer.returnValue(user_who_share_room)
 
+    @defer.inlineCallbacks
     def get_joined_users_from_context(self, event, context):
         state_group = context.state_group
         if not state_group:
@@ -242,11 +242,13 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             # To do this we set the state_group to a new object as object() != object()
             state_group = object()
 
-        return self._get_joined_users_from_context(
-            event.room_id, state_group, context.current_state_ids,
+        current_state_ids = yield context.get_current_state_ids(self)
+        result = yield self._get_joined_users_from_context(
+            event.room_id, state_group, current_state_ids,
             event=event,
             context=context,
         )
+        defer.returnValue(result)
 
     def get_joined_users_from_state(self, room_id, state_entry):
         state_group = state_entry.state_group
@@ -455,21 +457,33 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
         defer.returnValue(joined_hosts)
 
-    @cached(max_entries=10000, iterable=True)
+    @cached(max_entries=10000)
     def _get_joined_hosts_cache(self, room_id):
         return _JoinedHostsCache(self, room_id)
 
-    @cached()
-    def who_forgot_in_room(self, room_id):
-        return self._simple_select_list(
-            table="room_memberships",
-            retcols=("user_id", "event_id"),
-            keyvalues={
-                "room_id": room_id,
-                "forgotten": 1,
-            },
-            desc="who_forgot"
-        )
+    @cachedInlineCallbacks(num_args=2)
+    def did_forget(self, user_id, room_id):
+        """Returns whether user_id has elected to discard history for room_id.
+
+        Returns False if they have since re-joined."""
+        def f(txn):
+            sql = (
+                "SELECT"
+                "  COUNT(*)"
+                " FROM"
+                "  room_memberships"
+                " WHERE"
+                "  user_id = ?"
+                " AND"
+                "  room_id = ?"
+                " AND"
+                "  forgotten = 0"
+            )
+            txn.execute(sql, (user_id, room_id))
+            rows = txn.fetchall()
+            return rows[0][0]
+        count = yield self.runInteraction("did_forget_membership", f)
+        defer.returnValue(count == 0)
 
 
 class RoomMemberStore(RoomMemberWorkerStore):
@@ -578,36 +592,11 @@ class RoomMemberStore(RoomMemberWorkerStore):
             )
             txn.execute(sql, (user_id, room_id))
 
-            txn.call_after(self.did_forget.invalidate, (user_id, room_id))
             self._invalidate_cache_and_stream(
-                txn, self.who_forgot_in_room, (room_id,)
+                txn, self.did_forget, (user_id, room_id,),
             )
         return self.runInteraction("forget_membership", f)
 
-    @cachedInlineCallbacks(num_args=2)
-    def did_forget(self, user_id, room_id):
-        """Returns whether user_id has elected to discard history for room_id.
-
-        Returns False if they have since re-joined."""
-        def f(txn):
-            sql = (
-                "SELECT"
-                "  COUNT(*)"
-                " FROM"
-                "  room_memberships"
-                " WHERE"
-                "  user_id = ?"
-                " AND"
-                "  room_id = ?"
-                " AND"
-                "  forgotten = 0"
-            )
-            txn.execute(sql, (user_id, room_id))
-            rows = txn.fetchall()
-            return rows[0][0]
-        count = yield self.runInteraction("did_forget_membership", f)
-        defer.returnValue(count == 0)
-
     @defer.inlineCallbacks
     def _background_add_membership_profile(self, progress, batch_size):
         target_min_stream_id = progress.get(
diff --git a/synapse/storage/schema/delta/25/fts.py b/synapse/storage/schema/delta/25/fts.py
index e7351c3ae6..4b2ffd35fd 100644
--- a/synapse/storage/schema/delta/25/fts.py
+++ b/synapse/storage/schema/delta/25/fts.py
@@ -14,11 +14,11 @@
 
 import logging
 
-from synapse.storage.prepare_database import get_statements
-from synapse.storage.engines import PostgresEngine, Sqlite3Engine
-
 import simplejson
 
+from synapse.storage.engines import PostgresEngine, Sqlite3Engine
+from synapse.storage.prepare_database import get_statements
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/storage/schema/delta/27/ts.py b/synapse/storage/schema/delta/27/ts.py
index 6df57b5206..414f9f5aa0 100644
--- a/synapse/storage/schema/delta/27/ts.py
+++ b/synapse/storage/schema/delta/27/ts.py
@@ -14,10 +14,10 @@
 
 import logging
 
-from synapse.storage.prepare_database import get_statements
-
 import simplejson
 
+from synapse.storage.prepare_database import get_statements
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py
index 85bd1a2006..ef7ec34346 100644
--- a/synapse/storage/schema/delta/30/as_users.py
+++ b/synapse/storage/schema/delta/30/as_users.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from synapse.config.appservice import load_appservices
 
 from six.moves import range
 
+from synapse.config.appservice import load_appservices
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/storage/schema/delta/31/search_update.py b/synapse/storage/schema/delta/31/search_update.py
index fe6b7d196d..7d8ca5f93f 100644
--- a/synapse/storage/schema/delta/31/search_update.py
+++ b/synapse/storage/schema/delta/31/search_update.py
@@ -12,12 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.engines import PostgresEngine
-from synapse.storage.prepare_database import get_statements
-
 import logging
+
 import simplejson
 
+from synapse.storage.engines import PostgresEngine
+from synapse.storage.prepare_database import get_statements
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/storage/schema/delta/33/event_fields.py b/synapse/storage/schema/delta/33/event_fields.py
index 1e002f9db2..bff1256a7b 100644
--- a/synapse/storage/schema/delta/33/event_fields.py
+++ b/synapse/storage/schema/delta/33/event_fields.py
@@ -12,11 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.prepare_database import get_statements
-
 import logging
+
 import simplejson
 
+from synapse.storage.prepare_database import get_statements
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/storage/schema/delta/33/remote_media_ts.py b/synapse/storage/schema/delta/33/remote_media_ts.py
index 55ae43f395..9754d3ccfb 100644
--- a/synapse/storage/schema/delta/33/remote_media_ts.py
+++ b/synapse/storage/schema/delta/33/remote_media_ts.py
@@ -14,7 +14,6 @@
 
 import time
 
-
 ALTER_TABLE = "ALTER TABLE remote_media_cache ADD COLUMN last_access_ts BIGINT"
 
 
diff --git a/synapse/storage/schema/delta/34/cache_stream.py b/synapse/storage/schema/delta/34/cache_stream.py
index 3b63a1562d..cf09e43e2b 100644
--- a/synapse/storage/schema/delta/34/cache_stream.py
+++ b/synapse/storage/schema/delta/34/cache_stream.py
@@ -12,11 +12,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.prepare_database import get_statements
-from synapse.storage.engines import PostgresEngine
-
 import logging
 
+from synapse.storage.engines import PostgresEngine
+from synapse.storage.prepare_database import get_statements
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/storage/schema/delta/34/received_txn_purge.py b/synapse/storage/schema/delta/34/received_txn_purge.py
index 033144341c..67d505e68b 100644
--- a/synapse/storage/schema/delta/34/received_txn_purge.py
+++ b/synapse/storage/schema/delta/34/received_txn_purge.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.engines import PostgresEngine
-
 import logging
 
+from synapse.storage.engines import PostgresEngine
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/storage/schema/delta/34/sent_txn_purge.py b/synapse/storage/schema/delta/34/sent_txn_purge.py
index 81948e3431..0ffab10b6f 100644
--- a/synapse/storage/schema/delta/34/sent_txn_purge.py
+++ b/synapse/storage/schema/delta/34/sent_txn_purge.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.engines import PostgresEngine
-
 import logging
 
+from synapse.storage.engines import PostgresEngine
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/storage/schema/delta/37/remove_auth_idx.py b/synapse/storage/schema/delta/37/remove_auth_idx.py
index 20ad8bd5a6..a377884169 100644
--- a/synapse/storage/schema/delta/37/remove_auth_idx.py
+++ b/synapse/storage/schema/delta/37/remove_auth_idx.py
@@ -12,11 +12,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.prepare_database import get_statements
-from synapse.storage.engines import PostgresEngine
-
 import logging
 
+from synapse.storage.engines import PostgresEngine
+from synapse.storage.prepare_database import get_statements
+
 logger = logging.getLogger(__name__)
 
 DROP_INDICES = """
diff --git a/synapse/storage/schema/delta/42/user_dir.py b/synapse/storage/schema/delta/42/user_dir.py
index ea6a18196d..506f326f4d 100644
--- a/synapse/storage/schema/delta/42/user_dir.py
+++ b/synapse/storage/schema/delta/42/user_dir.py
@@ -14,8 +14,8 @@
 
 import logging
 
-from synapse.storage.prepare_database import get_statements
 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
+from synapse.storage.prepare_database import get_statements
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/storage/schema/delta/50/erasure_store.sql b/synapse/storage/schema/delta/50/erasure_store.sql
new file mode 100644
index 0000000000..5d8641a9ab
--- /dev/null
+++ b/synapse/storage/schema/delta/50/erasure_store.sql
@@ -0,0 +1,21 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- a table of users who have requested that their details be erased
+CREATE TABLE erased_users (
+    user_id TEXT NOT NULL
+);
+
+CREATE UNIQUE INDEX erased_users_user ON erased_users(user_id);
diff --git a/synapse/storage/schema/delta/50/make_event_content_nullable.py b/synapse/storage/schema/delta/50/make_event_content_nullable.py
new file mode 100644
index 0000000000..6dd467b6c5
--- /dev/null
+++ b/synapse/storage/schema/delta/50/make_event_content_nullable.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+We want to stop populating 'event.content', so we need to make it nullable.
+
+If this has to be rolled back, then the following should populate the missing data:
+
+Postgres:
+
+    UPDATE events SET content=(ej.json::json)->'content' FROM event_json ej
+    WHERE ej.event_id = events.event_id AND
+        stream_ordering < (
+            SELECT stream_ordering FROM events WHERE content IS NOT NULL
+            ORDER BY stream_ordering LIMIT 1
+        );
+
+    UPDATE events SET content=(ej.json::json)->'content' FROM event_json ej
+    WHERE ej.event_id = events.event_id AND
+        stream_ordering > (
+            SELECT stream_ordering FROM events WHERE content IS NOT NULL
+            ORDER BY stream_ordering DESC LIMIT 1
+        );
+
+SQLite:
+
+    UPDATE events SET content=(
+        SELECT json_extract(json,'$.content') FROM event_json ej
+        WHERE ej.event_id = events.event_id
+    )
+    WHERE
+        stream_ordering < (
+            SELECT stream_ordering FROM events WHERE content IS NOT NULL
+            ORDER BY stream_ordering LIMIT 1
+        )
+        OR stream_ordering > (
+            SELECT stream_ordering FROM events WHERE content IS NOT NULL
+            ORDER BY stream_ordering DESC LIMIT 1
+        );
+
+"""
+
+import logging
+
+from synapse.storage.engines import PostgresEngine
+
+logger = logging.getLogger(__name__)
+
+
+def run_create(cur, database_engine, *args, **kwargs):
+    pass
+
+
+def run_upgrade(cur, database_engine, *args, **kwargs):
+    if isinstance(database_engine, PostgresEngine):
+        cur.execute("""
+            ALTER TABLE events ALTER COLUMN content DROP NOT NULL;
+        """)
+        return
+
+    # sqlite is an arse about this. ref: https://www.sqlite.org/lang_altertable.html
+
+    cur.execute("SELECT sql FROM sqlite_master WHERE tbl_name='events' AND type='table'")
+    (oldsql,) = cur.fetchone()
+
+    sql = oldsql.replace("content TEXT NOT NULL", "content TEXT")
+    if sql == oldsql:
+        raise Exception("Couldn't find null constraint to drop in %s" % oldsql)
+
+    logger.info("Replacing definition of 'events' with: %s", sql)
+
+    cur.execute("PRAGMA schema_version")
+    (oldver,) = cur.fetchone()
+    cur.execute("PRAGMA writable_schema=ON")
+    cur.execute(
+        "UPDATE sqlite_master SET sql=? WHERE tbl_name='events' AND type='table'",
+        (sql, ),
+    )
+    cur.execute("PRAGMA schema_version=%i" % (oldver + 1,))
+    cur.execute("PRAGMA writable_schema=OFF")
diff --git a/synapse/storage/schema/delta/51/monthly_active_users.sql b/synapse/storage/schema/delta/51/monthly_active_users.sql
new file mode 100644
index 0000000000..c9d537d5a3
--- /dev/null
+++ b/synapse/storage/schema/delta/51/monthly_active_users.sql
@@ -0,0 +1,27 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- a table of monthly active users, for use where blocking based on mau limits
+CREATE TABLE monthly_active_users (
+    user_id TEXT NOT NULL,
+    -- Last time we saw the user. Not guaranteed to be accurate due to rate limiting
+    -- on updates, Granularity of updates governed by
+    -- synapse.storage.monthly_active_users.LAST_SEEN_GRANULARITY
+    -- Measured in ms since epoch.
+    timestamp BIGINT NOT NULL
+);
+
+CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users(user_id);
+CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users(timestamp);
diff --git a/synapse/storage/schema/full_schemas/16/event_edges.sql b/synapse/storage/schema/full_schemas/16/event_edges.sql
index 52eec88357..6b5a5a88fa 100644
--- a/synapse/storage/schema/full_schemas/16/event_edges.sql
+++ b/synapse/storage/schema/full_schemas/16/event_edges.sql
@@ -37,7 +37,8 @@ CREATE TABLE IF NOT EXISTS event_edges(
     event_id TEXT NOT NULL,
     prev_event_id TEXT NOT NULL,
     room_id TEXT NOT NULL,
-    is_state BOOL NOT NULL,
+    is_state BOOL NOT NULL,  -- true if this is a prev_state edge rather than a regular
+                             -- event dag edge.
     UNIQUE (event_id, prev_event_id, room_id, is_state)
 );
 
diff --git a/synapse/storage/schema/full_schemas/16/im.sql b/synapse/storage/schema/full_schemas/16/im.sql
index ba5346806e..5f5cb8d01d 100644
--- a/synapse/storage/schema/full_schemas/16/im.sql
+++ b/synapse/storage/schema/full_schemas/16/im.sql
@@ -19,7 +19,12 @@ CREATE TABLE IF NOT EXISTS events(
     event_id TEXT NOT NULL,
     type TEXT NOT NULL,
     room_id TEXT NOT NULL,
-    content TEXT NOT NULL,
+
+    -- 'content' used to be created NULLable, but as of delta 50 we drop that constraint.
+    -- the hack we use to drop the constraint doesn't work for an in-memory sqlite
+    -- database, which breaks the sytests. Hence, we no longer make it nullable.
+    content TEXT,
+
     unrecognized_keys TEXT,
     processed BOOL NOT NULL,
     outlier BOOL NOT NULL,
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index f0fa5d7631..d5b5df93e6 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -13,19 +13,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from collections import namedtuple
 import logging
 import re
-import simplejson as json
+from collections import namedtuple
 
 from six import string_types
 
+from canonicaljson import json
+
 from twisted.internet import defer
 
-from .background_updates import BackgroundUpdateStore
 from synapse.api.errors import SynapseError
 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
 
+from .background_updates import BackgroundUpdateStore
+
 logger = logging.getLogger(__name__)
 
 SearchEntry = namedtuple('SearchEntry', [
diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py
index 25922e5a9c..5623391f6e 100644
--- a/synapse/storage/signatures.py
+++ b/synapse/storage/signatures.py
@@ -13,15 +13,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
 import six
 
-from ._base import SQLBaseStore
-
 from unpaddedbase64 import encode_base64
+
+from twisted.internet import defer
+
 from synapse.crypto.event_signing import compute_event_reference_hash
 from synapse.util.caches.descriptors import cached, cachedList
 
+from ._base import SQLBaseStore
+
 # py2 sqlite has buffer hardcoded as only binary type, so we must use it,
 # despite being deprecated and removed in favor of memoryview
 if six.PY2:
@@ -72,7 +74,7 @@ class SignatureWorkerStore(SQLBaseStore):
             txn (cursor):
             event_id (str): Id for the Event.
         Returns:
-            A dict of algorithm -> hash.
+            A dict[unicode, bytes] of algorithm -> hash.
         """
         query = (
             "SELECT algorithm, hash"
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 85b8ec2b8f..4b971efdba 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -13,21 +13,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from collections import namedtuple
 import logging
+from collections import namedtuple
 
 from six import iteritems, itervalues
 from six.moves import range
 
 from twisted.internet import defer
 
+from synapse.api.constants import EventTypes
+from synapse.api.errors import NotFoundError
+from synapse.storage._base import SQLBaseStore
 from synapse.storage.background_updates import BackgroundUpdateStore
 from synapse.storage.engines import PostgresEngine
-from synapse.util.caches import intern_string, get_cache_factor_for
+from synapse.storage.events_worker import EventsWorkerStore
+from synapse.util.caches import get_cache_factor_for, intern_string
 from synapse.util.caches.descriptors import cached, cachedList
 from synapse.util.caches.dictionary_cache import DictionaryCache
 from synapse.util.stringutils import to_ascii
-from ._base import SQLBaseStore
 
 logger = logging.getLogger(__name__)
 
@@ -45,7 +48,8 @@ class _GetStateGroupDelta(namedtuple("_GetStateGroupDelta", ("prev_group", "delt
         return len(self.delta_ids) if self.delta_ids else 0
 
 
-class StateGroupWorkerStore(SQLBaseStore):
+# this inherits from EventsWorkerStore because it calls self.get_events
+class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
     """The parts of StateGroupStore that can be called from workers.
     """
 
@@ -56,9 +60,68 @@ class StateGroupWorkerStore(SQLBaseStore):
     def __init__(self, db_conn, hs):
         super(StateGroupWorkerStore, self).__init__(db_conn, hs)
 
+        # Originally the state store used a single DictionaryCache to cache the
+        # event IDs for the state types in a given state group to avoid hammering
+        # on the state_group* tables.
+        #
+        # The point of using a DictionaryCache is that it can cache a subset
+        # of the state events for a given state group (i.e. a subset of the keys for a
+        # given dict which is an entry in the cache for a given state group ID).
+        #
+        # However, this poses problems when performing complicated queries
+        # on the store - for instance: "give me all the state for this group, but
+        # limit members to this subset of users", as DictionaryCache's API isn't
+        # rich enough to say "please cache any of these fields, apart from this subset".
+        # This is problematic when lazy loading members, which requires this behaviour,
+        # as without it the cache has no choice but to speculatively load all
+        # state events for the group, which negates the efficiency being sought.
+        #
+        # Rather than overcomplicating DictionaryCache's API, we instead split the
+        # state_group_cache into two halves - one for tracking non-member events,
+        # and the other for tracking member_events.  This means that lazy loading
+        # queries can be made in a cache-friendly manner by querying both caches
+        # separately and then merging the result.  So for the example above, you
+        # would query the members cache for a specific subset of state keys
+        # (which DictionaryCache will handle efficiently and fine) and the non-members
+        # cache for all state (which DictionaryCache will similarly handle fine)
+        # and then just merge the results together.
+        #
+        # We size the non-members cache to be smaller than the members cache as the
+        # vast majority of state in Matrix (today) is member events.
+
         self._state_group_cache = DictionaryCache(
-            "*stateGroupCache*", 500000 * get_cache_factor_for("stateGroupCache")
+            "*stateGroupCache*",
+            # TODO: this hasn't been tuned yet
+            50000 * get_cache_factor_for("stateGroupCache")
         )
+        self._state_group_members_cache = DictionaryCache(
+            "*stateGroupMembersCache*",
+            500000 * get_cache_factor_for("stateGroupMembersCache")
+        )
+
+    @defer.inlineCallbacks
+    def get_room_version(self, room_id):
+        """Get the room_version of a given room
+
+        Args:
+            room_id (str)
+
+        Returns:
+            Deferred[str]
+
+        Raises:
+            NotFoundError if the room is unknown
+        """
+        # for now we do this by looking at the create event. We may want to cache this
+        # more intelligently in future.
+        state_ids = yield self.get_current_state_ids(room_id)
+        create_id = state_ids.get((EventTypes.Create, ""))
+
+        if not create_id:
+            raise NotFoundError("Unknown room")
+
+        create_event = yield self.get_event(create_id)
+        defer.returnValue(create_event.content.get("room_version", "1"))
 
     @cached(max_entries=100000, iterable=True)
     def get_current_state_ids(self, room_id):
@@ -88,6 +151,69 @@ class StateGroupWorkerStore(SQLBaseStore):
             _get_current_state_ids_txn,
         )
 
+    # FIXME: how should this be cached?
+    def get_filtered_current_state_ids(self, room_id, types, filtered_types=None):
+        """Get the current state event of a given type for a room based on the
+        current_state_events table.  This may not be as up-to-date as the result
+        of doing a fresh state resolution as per state_handler.get_current_state
+        Args:
+            room_id (str)
+            types (list[(Str, (Str|None))]): List of (type, state_key) tuples
+                which are used to filter the state fetched. `state_key` may be
+                None, which matches any `state_key`
+            filtered_types (list[Str]|None): List of types to apply the above filter to.
+        Returns:
+            deferred: dict of (type, state_key) -> event
+        """
+
+        include_other_types = False if filtered_types is None else True
+
+        def _get_filtered_current_state_ids_txn(txn):
+            results = {}
+            sql = """SELECT type, state_key, event_id FROM current_state_events
+                     WHERE room_id = ? %s"""
+            # Turns out that postgres doesn't like doing a list of OR's and
+            # is about 1000x slower, so we just issue a query for each specific
+            # type seperately.
+            if types:
+                clause_to_args = [
+                    (
+                        "AND type = ? AND state_key = ?",
+                        (etype, state_key)
+                    ) if state_key is not None else (
+                        "AND type = ?",
+                        (etype,)
+                    )
+                    for etype, state_key in types
+                ]
+
+                if include_other_types:
+                    unique_types = set(filtered_types)
+                    clause_to_args.append(
+                        (
+                            "AND type <> ? " * len(unique_types),
+                            list(unique_types)
+                        )
+                    )
+            else:
+                # If types is None we fetch all the state, and so just use an
+                # empty where clause with no extra args.
+                clause_to_args = [("", [])]
+            for where_clause, where_args in clause_to_args:
+                args = [room_id]
+                args.extend(where_args)
+                txn.execute(sql % (where_clause,), args)
+                for row in txn:
+                    typ, state_key, event_id = row
+                    key = (intern_string(typ), intern_string(state_key))
+                    results[key] = event_id
+            return results
+
+        return self.runInteraction(
+            "get_filtered_current_state_ids",
+            _get_filtered_current_state_ids_txn,
+        )
+
     @cached(max_entries=10000, iterable=True)
     def get_state_group_delta(self, state_group):
         """Given a state group try to return a previous group and a delta between
@@ -184,8 +310,21 @@ class StateGroupWorkerStore(SQLBaseStore):
         })
 
     @defer.inlineCallbacks
-    def _get_state_groups_from_groups(self, groups, types):
-        """Returns dictionary state_group -> (dict of (type, state_key) -> event id)
+    def _get_state_groups_from_groups(self, groups, types, members=None):
+        """Returns the state groups for a given set of groups, filtering on
+        types of state events.
+
+        Args:
+            groups(list[int]): list of state group IDs to query
+            types (Iterable[str, str|None]|None): list of 2-tuples of the form
+                (`type`, `state_key`), where a `state_key` of `None` matches all
+                state_keys for the `type`. If None, all types are returned.
+            members (bool|None): If not None, then, in addition to any filtering
+                implied by types, the results are also filtered to only include
+                member events (if True), or to exclude member events (if False)
+
+        Returns:
+            dictionary state_group -> (dict of (type, state_key) -> event id)
         """
         results = {}
 
@@ -193,14 +332,17 @@ class StateGroupWorkerStore(SQLBaseStore):
         for chunk in chunks:
             res = yield self.runInteraction(
                 "_get_state_groups_from_groups",
-                self._get_state_groups_from_groups_txn, chunk, types,
+                self._get_state_groups_from_groups_txn, chunk, types, members,
             )
             results.update(res)
 
         defer.returnValue(results)
 
-    def _get_state_groups_from_groups_txn(self, txn, groups, types=None):
+    def _get_state_groups_from_groups_txn(
+        self, txn, groups, types=None, members=None,
+    ):
         results = {group: {} for group in groups}
+
         if types is not None:
             types = list(set(types))  # deduplicate types list
 
@@ -235,10 +377,15 @@ class StateGroupWorkerStore(SQLBaseStore):
                 %s
             """)
 
+            if members is True:
+                sql += " AND type = '%s'" % (EventTypes.Member,)
+            elif members is False:
+                sql += " AND type <> '%s'" % (EventTypes.Member,)
+
             # Turns out that postgres doesn't like doing a list of OR's and
             # is about 1000x slower, so we just issue a query for each specific
             # type seperately.
-            if types:
+            if types is not None:
                 clause_to_args = [
                     (
                         "AND type = ? AND state_key = ?",
@@ -277,10 +424,16 @@ class StateGroupWorkerStore(SQLBaseStore):
                     else:
                         where_clauses.append("(type = ? AND state_key = ?)")
                         where_args.extend([typ[0], typ[1]])
+
                 where_clause = "AND (%s)" % (" OR ".join(where_clauses))
             else:
                 where_clause = ""
 
+            if members is True:
+                where_clause += " AND type = '%s'" % EventTypes.Member
+            elif members is False:
+                where_clause += " AND type <> '%s'" % EventTypes.Member
+
             # We don't use WITH RECURSIVE on sqlite3 as there are distributions
             # that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
             for group in groups:
@@ -331,27 +484,30 @@ class StateGroupWorkerStore(SQLBaseStore):
         return results
 
     @defer.inlineCallbacks
-    def get_state_for_events(self, event_ids, types):
+    def get_state_for_events(self, event_ids, types, filtered_types=None):
         """Given a list of event_ids and type tuples, return a list of state
         dicts for each event. The state dicts will only have the type/state_keys
         that are in the `types` list.
 
         Args:
-            event_ids (list)
-            types (list): List of (type, state_key) tuples which are used to
-                filter the state fetched. `state_key` may be None, which matches
-                any `state_key`
+            event_ids (list[string])
+            types (list[(str, str|None)]|None): List of (type, state_key) tuples
+                which are used to filter the state fetched. If `state_key` is None,
+                all events are returned of the given type.
+                May be None, which matches any key.
+            filtered_types(list[str]|None): Only apply filtering via `types` to this
+                list of event types.  Other types of events are returned unfiltered.
+                If None, `types` filtering is applied to all events.
 
         Returns:
-            deferred: A list of dicts corresponding to the event_ids given.
-            The dicts are mappings from (type, state_key) -> state_events
+            deferred: A dict of (event_id) -> (type, state_key) -> [state_events]
         """
         event_to_groups = yield self._get_state_group_for_events(
             event_ids,
         )
 
         groups = set(itervalues(event_to_groups))
-        group_to_state = yield self._get_state_for_groups(groups, types)
+        group_to_state = yield self._get_state_for_groups(groups, types, filtered_types)
 
         state_event_map = yield self.get_events(
             [ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)],
@@ -370,25 +526,30 @@ class StateGroupWorkerStore(SQLBaseStore):
         defer.returnValue({event: event_to_state[event] for event in event_ids})
 
     @defer.inlineCallbacks
-    def get_state_ids_for_events(self, event_ids, types=None):
+    def get_state_ids_for_events(self, event_ids, types=None, filtered_types=None):
         """
-        Get the state dicts corresponding to a list of events
+        Get the state dicts corresponding to a list of events, containing the event_ids
+        of the state events (as opposed to the events themselves)
 
         Args:
             event_ids(list(str)): events whose state should be returned
-            types(list[(str, str)]|None): List of (type, state_key) tuples
-                which are used to filter the state fetched. May be None, which
-                matches any key
+            types(list[(str, str|None)]|None): List of (type, state_key) tuples
+                which are used to filter the state fetched. If `state_key` is None,
+                all events are returned of the given type.
+                May be None, which matches any key.
+            filtered_types(list[str]|None): Only apply filtering via `types` to this
+                list of event types.  Other types of events are returned unfiltered.
+                If None, `types` filtering is applied to all events.
 
         Returns:
-            A deferred dict from event_id -> (type, state_key) -> state_event
+            A deferred dict from event_id -> (type, state_key) -> event_id
         """
         event_to_groups = yield self._get_state_group_for_events(
             event_ids,
         )
 
         groups = set(itervalues(event_to_groups))
-        group_to_state = yield self._get_state_for_groups(groups, types)
+        group_to_state = yield self._get_state_for_groups(groups, types, filtered_types)
 
         event_to_state = {
             event_id: group_to_state[group]
@@ -398,37 +559,45 @@ class StateGroupWorkerStore(SQLBaseStore):
         defer.returnValue({event: event_to_state[event] for event in event_ids})
 
     @defer.inlineCallbacks
-    def get_state_for_event(self, event_id, types=None):
+    def get_state_for_event(self, event_id, types=None, filtered_types=None):
         """
         Get the state dict corresponding to a particular event
 
         Args:
             event_id(str): event whose state should be returned
-            types(list[(str, str)]|None): List of (type, state_key) tuples
-                which are used to filter the state fetched. May be None, which
-                matches any key
+            types(list[(str, str|None)]|None): List of (type, state_key) tuples
+                which are used to filter the state fetched. If `state_key` is None,
+                all events are returned of the given type.
+                May be None, which matches any key.
+            filtered_types(list[str]|None): Only apply filtering via `types` to this
+                list of event types.  Other types of events are returned unfiltered.
+                If None, `types` filtering is applied to all events.
 
         Returns:
             A deferred dict from (type, state_key) -> state_event
         """
-        state_map = yield self.get_state_for_events([event_id], types)
+        state_map = yield self.get_state_for_events([event_id], types, filtered_types)
         defer.returnValue(state_map[event_id])
 
     @defer.inlineCallbacks
-    def get_state_ids_for_event(self, event_id, types=None):
+    def get_state_ids_for_event(self, event_id, types=None, filtered_types=None):
         """
         Get the state dict corresponding to a particular event
 
         Args:
             event_id(str): event whose state should be returned
-            types(list[(str, str)]|None): List of (type, state_key) tuples
-                which are used to filter the state fetched. May be None, which
-                matches any key
+            types(list[(str, str|None)]|None): List of (type, state_key) tuples
+                which are used to filter the state fetched. If `state_key` is None,
+                all events are returned of the given type.
+                May be None, which matches any key.
+            filtered_types(list[str]|None): Only apply filtering via `types` to this
+                list of event types.  Other types of events are returned unfiltered.
+                If None, `types` filtering is applied to all events.
 
         Returns:
             A deferred dict from (type, state_key) -> state_event
         """
-        state_map = yield self.get_state_ids_for_events([event_id], types)
+        state_map = yield self.get_state_ids_for_events([event_id], types, filtered_types)
         defer.returnValue(state_map[event_id])
 
     @cached(max_entries=50000)
@@ -459,58 +628,76 @@ class StateGroupWorkerStore(SQLBaseStore):
 
         defer.returnValue({row["event_id"]: row["state_group"] for row in rows})
 
-    def _get_some_state_from_cache(self, group, types):
+    def _get_some_state_from_cache(self, cache, group, types, filtered_types=None):
         """Checks if group is in cache. See `_get_state_for_groups`
 
-        Returns 3-tuple (`state_dict`, `missing_types`, `got_all`).
-        `missing_types` is the list of types that aren't in the cache for that
-        group. `got_all` is a bool indicating if we successfully retrieved all
+        Args:
+            cache(DictionaryCache): the state group cache to use
+            group(int): The state group to lookup
+            types(list[str, str|None]): List of 2-tuples of the form
+                (`type`, `state_key`), where a `state_key` of `None` matches all
+                state_keys for the `type`.
+            filtered_types(list[str]|None): Only apply filtering via `types` to this
+                list of event types.  Other types of events are returned unfiltered.
+                If None, `types` filtering is applied to all events.
+
+        Returns 2-tuple (`state_dict`, `got_all`).
+        `got_all` is a bool indicating if we successfully retrieved all
         requests state from the cache, if False we need to query the DB for the
         missing state.
-
-        Args:
-            group: The state group to lookup
-            types (list): List of 2-tuples of the form (`type`, `state_key`),
-                where a `state_key` of `None` matches all state_keys for the
-                `type`.
         """
-        is_all, known_absent, state_dict_ids = self._state_group_cache.get(group)
+        is_all, known_absent, state_dict_ids = cache.get(group)
 
         type_to_key = {}
-        missing_types = set()
+
+        # tracks whether any of our requested types are missing from the cache
+        missing_types = False
 
         for typ, state_key in types:
             key = (typ, state_key)
-            if state_key is None:
+
+            if (
+                state_key is None or
+                (filtered_types is not None and typ not in filtered_types)
+            ):
                 type_to_key[typ] = None
-                missing_types.add(key)
+                # we mark the type as missing from the cache because
+                # when the cache was populated it might have been done with a
+                # restricted set of state_keys, so the wildcard will not work
+                # and the cache may be incomplete.
+                missing_types = True
             else:
                 if type_to_key.get(typ, object()) is not None:
                     type_to_key.setdefault(typ, set()).add(state_key)
 
                 if key not in state_dict_ids and key not in known_absent:
-                    missing_types.add(key)
+                    missing_types = True
 
         sentinel = object()
 
         def include(typ, state_key):
             valid_state_keys = type_to_key.get(typ, sentinel)
             if valid_state_keys is sentinel:
-                return False
+                return filtered_types is not None and typ not in filtered_types
             if valid_state_keys is None:
                 return True
             if state_key in valid_state_keys:
                 return True
             return False
 
-        got_all = is_all or not missing_types
+        got_all = is_all
+        if not got_all:
+            # the cache is incomplete. We may still have got all the results we need, if
+            # we don't have any wildcards in the match list.
+            if not missing_types and filtered_types is None:
+                got_all = True
 
         return {
             k: v for k, v in iteritems(state_dict_ids)
             if include(k[0], k[1])
-        }, missing_types, got_all
+        }, got_all
 
-    def _get_all_state_from_cache(self, group):
+    def _get_all_state_from_cache(self, cache, group):
         """Checks if group is in cache. See `_get_state_for_groups`
 
         Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool
@@ -518,18 +705,91 @@ class StateGroupWorkerStore(SQLBaseStore):
         cache, if False we need to query the DB for the missing state.
 
         Args:
+            cache(DictionaryCache): the state group cache to use
             group: The state group to lookup
         """
-        is_all, _, state_dict_ids = self._state_group_cache.get(group)
+        is_all, _, state_dict_ids = cache.get(group)
 
         return state_dict_ids, is_all
 
     @defer.inlineCallbacks
-    def _get_state_for_groups(self, groups, types=None):
-        """Given list of groups returns dict of group -> list of state events
-        with matching types. `types` is a list of `(type, state_key)`, where
-        a `state_key` of None matches all state_keys. If `types` is None then
-        all events are returned.
+    def _get_state_for_groups(self, groups, types=None, filtered_types=None):
+        """Gets the state at each of a list of state groups, optionally
+        filtering by type/state_key
+
+        Args:
+            groups (iterable[int]): list of state groups for which we want
+                to get the state.
+            types (None|iterable[(str, None|str)]):
+                indicates the state type/keys required. If None, the whole
+                state is fetched and returned.
+
+                Otherwise, each entry should be a `(type, state_key)` tuple to
+                include in the response. A `state_key` of None is a wildcard
+                meaning that we require all state with that type.
+            filtered_types(list[str]|None): Only apply filtering via `types` to this
+                list of event types.  Other types of events are returned unfiltered.
+                If None, `types` filtering is applied to all events.
+
+        Returns:
+            Deferred[dict[int, dict[(type, state_key), EventBase]]]
+                a dictionary mapping from state group to state dictionary.
+        """
+        if types is not None:
+            non_member_types = [t for t in types if t[0] != EventTypes.Member]
+
+            if filtered_types is not None and EventTypes.Member not in filtered_types:
+                # we want all of the membership events
+                member_types = None
+            else:
+                member_types = [t for t in types if t[0] == EventTypes.Member]
+
+        else:
+            non_member_types = None
+            member_types = None
+
+        non_member_state = yield self._get_state_for_groups_using_cache(
+            groups, self._state_group_cache, non_member_types, filtered_types,
+        )
+        # XXX: we could skip this entirely if member_types is []
+        member_state = yield self._get_state_for_groups_using_cache(
+            # we set filtered_types=None as member_state only ever contain members.
+            groups, self._state_group_members_cache, member_types, None,
+        )
+
+        state = non_member_state
+        for group in groups:
+            state[group].update(member_state[group])
+
+        defer.returnValue(state)
+
+    @defer.inlineCallbacks
+    def _get_state_for_groups_using_cache(
+        self, groups, cache, types=None, filtered_types=None
+    ):
+        """Gets the state at each of a list of state groups, optionally
+        filtering by type/state_key, querying from a specific cache.
+
+        Args:
+            groups (iterable[int]): list of state groups for which we want
+                to get the state.
+            cache (DictionaryCache): the cache of group ids to state dicts which
+                we will pass through - either the normal state cache or the specific
+                members state cache.
+            types (None|iterable[(str, None|str)]):
+                indicates the state type/keys required. If None, the whole
+                state is fetched and returned.
+
+                Otherwise, each entry should be a `(type, state_key)` tuple to
+                include in the response. A `state_key` of None is a wildcard
+                meaning that we require all state with that type.
+            filtered_types(list[str]|None): Only apply filtering via `types` to this
+                list of event types.  Other types of events are returned unfiltered.
+                If None, `types` filtering is applied to all events.
+
+        Returns:
+            Deferred[dict[int, dict[(type, state_key), EventBase]]]
+                a dictionary mapping from state group to state dictionary.
         """
         if types:
             types = frozenset(types)
@@ -537,8 +797,8 @@ class StateGroupWorkerStore(SQLBaseStore):
         missing_groups = []
         if types is not None:
             for group in set(groups):
-                state_dict_ids, _, got_all = self._get_some_state_from_cache(
-                    group, types
+                state_dict_ids, got_all = self._get_some_state_from_cache(
+                    cache, group, types, filtered_types
                 )
                 results[group] = state_dict_ids
 
@@ -547,7 +807,7 @@ class StateGroupWorkerStore(SQLBaseStore):
         else:
             for group in set(groups):
                 state_dict_ids, got_all = self._get_all_state_from_cache(
-                    group
+                    cache, group
                 )
 
                 results[group] = state_dict_ids
@@ -556,29 +816,46 @@ class StateGroupWorkerStore(SQLBaseStore):
                     missing_groups.append(group)
 
         if missing_groups:
-            # Okay, so we have some missing_types, lets fetch them.
-            cache_seq_num = self._state_group_cache.sequence
+            # Okay, so we have some missing_types, let's fetch them.
+            cache_seq_num = cache.sequence
+
+            # the DictionaryCache knows if it has *all* the state, but
+            # does not know if it has all of the keys of a particular type,
+            # which makes wildcard lookups expensive unless we have a complete
+            # cache. Hence, if we are doing a wildcard lookup, populate the
+            # cache fully so that we can do an efficient lookup next time.
+
+            if filtered_types or (types and any(k is None for (t, k) in types)):
+                types_to_fetch = None
+            else:
+                types_to_fetch = types
 
             group_to_state_dict = yield self._get_state_groups_from_groups(
-                missing_groups, types
+                missing_groups, types_to_fetch, cache == self._state_group_members_cache,
             )
 
-            # Now we want to update the cache with all the things we fetched
-            # from the database.
             for group, group_state_dict in iteritems(group_to_state_dict):
                 state_dict = results[group]
 
-                state_dict.update(
-                    ((intern_string(k[0]), intern_string(k[1])), to_ascii(v))
-                    for k, v in iteritems(group_state_dict)
-                )
-
-                self._state_group_cache.update(
+                # update the result, filtering by `types`.
+                if types:
+                    for k, v in iteritems(group_state_dict):
+                        (typ, _) = k
+                        if (
+                            (k in types or (typ, None) in types) or
+                            (filtered_types and typ not in filtered_types)
+                        ):
+                            state_dict[k] = v
+                else:
+                    state_dict.update(group_state_dict)
+
+                # update the cache with all the things we fetched from the
+                # database.
+                cache.update(
                     cache_seq_num,
                     key=group,
-                    value=state_dict,
-                    full=(types is None),
-                    known_absent=types,
+                    value=group_state_dict,
+                    fetched_keys=types_to_fetch,
                 )
 
         defer.returnValue(results)
@@ -676,16 +953,33 @@ class StateGroupWorkerStore(SQLBaseStore):
                     ],
                 )
 
-            # Prefill the state group cache with this group.
+            # Prefill the state group caches with this group.
             # It's fine to use the sequence like this as the state group map
             # is immutable. (If the map wasn't immutable then this prefill could
             # race with another update)
+
+            current_member_state_ids = {
+                s: ev
+                for (s, ev) in iteritems(current_state_ids)
+                if s[0] == EventTypes.Member
+            }
+            txn.call_after(
+                self._state_group_members_cache.update,
+                self._state_group_members_cache.sequence,
+                key=state_group,
+                value=dict(current_member_state_ids),
+            )
+
+            current_non_member_state_ids = {
+                s: ev
+                for (s, ev) in iteritems(current_state_ids)
+                if s[0] != EventTypes.Member
+            }
             txn.call_after(
                 self._state_group_cache.update,
                 self._state_group_cache.sequence,
                 key=state_group,
-                value=dict(current_state_ids),
-                full=True,
+                value=dict(current_non_member_state_ids),
             )
 
             return state_group
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index fb463c525a..4c296d72c0 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -33,22 +33,20 @@ what sort order was used:
       and stream ordering columns respectively.
 """
 
+import abc
+import logging
+from collections import namedtuple
+
+from six.moves import range
+
 from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.events import EventsWorkerStore
-
+from synapse.storage.engines import PostgresEngine
+from synapse.storage.events_worker import EventsWorkerStore
 from synapse.types import RoomStreamToken
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 from synapse.util.logcontext import make_deferred_yieldable, run_in_background
-from synapse.storage.engines import PostgresEngine
-
-import abc
-import logging
-
-from six.moves import range
-from collections import namedtuple
-
 
 logger = logging.getLogger(__name__)
 
@@ -350,7 +348,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             end_token (str): The stream token representing now.
 
         Returns:
-            Deferred[tuple[list[FrozenEvent],  str]]: Returns a list of
+            Deferred[tuple[list[FrozenEvent], str]]: Returns a list of
             events and a token pointing to the start of the returned
             events.
             The events returned are in ascending order.
@@ -381,7 +379,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             end_token (str): The stream token representing now.
 
         Returns:
-            Deferred[tuple[list[_EventDictReturn],  str]]: Returns a list of
+            Deferred[tuple[list[_EventDictReturn], str]]: Returns a list of
             _EventDictReturn and a token pointing to the start of the returned
             events.
             The events returned are in ascending order.
@@ -529,7 +527,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             )
 
     @defer.inlineCallbacks
-    def get_events_around(self, room_id, event_id, before_limit, after_limit):
+    def get_events_around(
+        self, room_id, event_id, before_limit, after_limit, event_filter=None,
+    ):
         """Retrieve events and pagination tokens around a given event in a
         room.
 
@@ -538,6 +538,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             event_id (str)
             before_limit (int)
             after_limit (int)
+            event_filter (Filter|None)
 
         Returns:
             dict
@@ -545,7 +546,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         results = yield self.runInteraction(
             "get_events_around", self._get_events_around_txn,
-            room_id, event_id, before_limit, after_limit
+            room_id, event_id, before_limit, after_limit, event_filter,
         )
 
         events_before = yield self._get_events(
@@ -565,7 +566,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             "end": results["after"]["token"],
         })
 
-    def _get_events_around_txn(self, txn, room_id, event_id, before_limit, after_limit):
+    def _get_events_around_txn(
+        self, txn, room_id, event_id, before_limit, after_limit, event_filter,
+    ):
         """Retrieves event_ids and pagination tokens around a given event in a
         room.
 
@@ -574,6 +577,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             event_id (str)
             before_limit (int)
             after_limit (int)
+            event_filter (Filter|None)
 
         Returns:
             dict
@@ -603,11 +607,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         rows, start_token = self._paginate_room_events_txn(
             txn, room_id, before_token, direction='b', limit=before_limit,
+            event_filter=event_filter,
         )
         events_before = [r.event_id for r in rows]
 
         rows, end_token = self._paginate_room_events_txn(
             txn, room_id, after_token, direction='f', limit=after_limit,
+            event_filter=event_filter,
         )
         events_after = [r.event_id for r in rows]
 
diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py
index 6671d3cfca..0f657b2bd3 100644
--- a/synapse/storage/tags.py
+++ b/synapse/storage/tags.py
@@ -14,16 +14,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.account_data import AccountDataWorkerStore
-
-from synapse.util.caches.descriptors import cached
-from twisted.internet import defer
-
-import simplejson as json
 import logging
 
 from six.moves import range
 
+from canonicaljson import json
+
+from twisted.internet import defer
+
+from synapse.storage.account_data import AccountDataWorkerStore
+from synapse.util.caches.descriptors import cached
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index e485d19b84..0c42bd3322 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -13,18 +13,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import SQLBaseStore
-from synapse.util.caches.descriptors import cached
+import logging
+from collections import namedtuple
 
-from twisted.internet import defer
 import six
 
 from canonicaljson import encode_canonical_json
 
-from collections import namedtuple
+from twisted.internet import defer
 
-import logging
-import simplejson as json
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.caches.descriptors import cached
+
+from ._base import SQLBaseStore, db_to_json
 
 # py2 sqlite has buffer hardcoded as only binary type, so we must use it,
 # despite being deprecated and removed in favor of memoryview
@@ -57,7 +58,7 @@ class TransactionStore(SQLBaseStore):
     def __init__(self, db_conn, hs):
         super(TransactionStore, self).__init__(db_conn, hs)
 
-        self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
+        self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000)
 
     def get_received_txn_response(self, transaction_id, origin):
         """For an incoming transaction from a given origin, check if we have
@@ -94,7 +95,8 @@ class TransactionStore(SQLBaseStore):
         )
 
         if result and result["response_code"]:
-            return result["response_code"], json.loads(str(result["response_json"]))
+            return result["response_code"], db_to_json(result["response_json"])
+
         else:
             return None
 
@@ -271,6 +273,11 @@ class TransactionStore(SQLBaseStore):
         txn.execute(query, (self._clock.time_msec(),))
         return self.cursor_to_dict(txn)
 
+    def _start_cleanup_transactions(self):
+        return run_as_background_process(
+            "cleanup_transactions", self._cleanup_transactions,
+        )
+
     def _cleanup_transactions(self):
         now = self._clock.time_msec()
         month_ago = now - 30 * 24 * 60 * 60 * 1000
diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py
index 275c299998..a8781b0e5d 100644
--- a/synapse/storage/user_directory.py
+++ b/synapse/storage/user_directory.py
@@ -13,19 +13,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
+import logging
+import re
 
-from ._base import SQLBaseStore
+from six import iteritems
+
+from twisted.internet import defer
 
-from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 from synapse.api.constants import EventTypes, JoinRules
 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
 from synapse.types import get_domain_from_id, get_localpart_from_id
+from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 
-from six import iteritems
-
-import re
-import logging
+from ._base import SQLBaseStore
 
 logger = logging.getLogger(__name__)
 
@@ -265,7 +265,7 @@ class UserDirectoryStore(SQLBaseStore):
         self.get_user_in_public_room.invalidate((user_id,))
 
     def get_users_in_public_due_to_room(self, room_id):
-        """Get all user_ids that are in the room directory becuase they're
+        """Get all user_ids that are in the room directory because they're
         in the given room_id
         """
         return self._simple_select_onecol(
@@ -277,7 +277,7 @@ class UserDirectoryStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def get_users_in_dir_due_to_room(self, room_id):
-        """Get all user_ids that are in the room directory becuase they're
+        """Get all user_ids that are in the room directory because they're
         in the given room_id
         """
         user_ids_dir = yield self._simple_select_onecol(
diff --git a/synapse/storage/user_erasure_store.py b/synapse/storage/user_erasure_store.py
new file mode 100644
index 0000000000..be013f4427
--- /dev/null
+++ b/synapse/storage/user_erasure_store.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import operator
+
+from twisted.internet import defer
+
+from synapse.storage._base import SQLBaseStore
+from synapse.util.caches.descriptors import cached, cachedList
+
+
+class UserErasureWorkerStore(SQLBaseStore):
+    @cached()
+    def is_user_erased(self, user_id):
+        """
+        Check if the given user id has requested erasure
+
+        Args:
+            user_id (str): full user id to check
+
+        Returns:
+            Deferred[bool]: True if the user has requested erasure
+        """
+        return self._simple_select_onecol(
+            table="erased_users",
+            keyvalues={"user_id": user_id},
+            retcol="1",
+            desc="is_user_erased",
+        ).addCallback(operator.truth)
+
+    @cachedList(
+        cached_method_name="is_user_erased",
+        list_name="user_ids",
+        inlineCallbacks=True,
+    )
+    def are_users_erased(self, user_ids):
+        """
+        Checks which users in a list have requested erasure
+
+        Args:
+            user_ids (iterable[str]): full user id to check
+
+        Returns:
+            Deferred[dict[str, bool]]:
+                for each user, whether the user has requested erasure.
+        """
+        # this serves the dual purpose of (a) making sure we can do len and
+        # iterate it multiple times, and (b) avoiding duplicates.
+        user_ids = tuple(set(user_ids))
+
+        def _get_erased_users(txn):
+            txn.execute(
+                "SELECT user_id FROM erased_users WHERE user_id IN (%s)" % (
+                    ",".join("?" * len(user_ids))
+                ),
+                user_ids,
+            )
+            return set(r[0] for r in txn)
+
+        erased_users = yield self.runInteraction(
+            "are_users_erased", _get_erased_users,
+        )
+        res = dict((u, u in erased_users) for u in user_ids)
+        defer.returnValue(res)
+
+
+class UserErasureStore(UserErasureWorkerStore):
+    def mark_user_erased(self, user_id):
+        """Indicate that user_id wishes their message history to be erased.
+
+        Args:
+            user_id (str): full user_id to be erased
+        """
+        def f(txn):
+            # first check if they are already in the list
+            txn.execute(
+                "SELECT 1 FROM erased_users WHERE user_id = ?",
+                (user_id, )
+            )
+            if txn.fetchone():
+                return
+
+            # they are not already there: do the insert.
+            txn.execute(
+                "INSERT INTO erased_users (user_id) VALUES (?)",
+                (user_id, )
+            )
+
+            self._invalidate_cache_and_stream(
+                txn, self.is_user_erased, (user_id,)
+            )
+        return self.runInteraction("mark_user_erased", f)
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index 95031dc9ec..d6160d5e4d 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -13,9 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from collections import deque
 import contextlib
 import threading
+from collections import deque
 
 
 class IdGenerator(object):
diff --git a/synapse/streams/config.py b/synapse/streams/config.py
index ca78e551cb..451e4fa441 100644
--- a/synapse/streams/config.py
+++ b/synapse/streams/config.py
@@ -13,11 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.api.errors import SynapseError
-from synapse.types import StreamToken
-
 import logging
 
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import parse_integer, parse_string
+from synapse.types import StreamToken
 
 logger = logging.getLogger(__name__)
 
@@ -57,23 +57,10 @@ class PaginationConfig(object):
     @classmethod
     def from_request(cls, request, raise_invalid_params=True,
                      default_limit=None):
-        def get_param(name, default=None):
-            lst = request.args.get(name, [])
-            if len(lst) > 1:
-                raise SynapseError(
-                    400, "%s must be specified only once" % (name,)
-                )
-            elif len(lst) == 1:
-                return lst[0]
-            else:
-                return default
-
-        direction = get_param("dir", 'f')
-        if direction not in ['f', 'b']:
-            raise SynapseError(400, "'dir' parameter is invalid.")
-
-        from_tok = get_param("from")
-        to_tok = get_param("to")
+        direction = parse_string(request, "dir", default='f', allowed_values=['f', 'b'])
+
+        from_tok = parse_string(request, "from")
+        to_tok = parse_string(request, "to")
 
         try:
             if from_tok == "END":
@@ -89,12 +76,10 @@ class PaginationConfig(object):
         except Exception:
             raise SynapseError(400, "'to' paramater is invalid")
 
-        limit = get_param("limit", None)
-        if limit is not None and not limit.isdigit():
-            raise SynapseError(400, "'limit' parameter must be an integer.")
+        limit = parse_integer(request, "limit", default=default_limit)
 
-        if limit is None:
-            limit = default_limit
+        if limit and limit < 0:
+            raise SynapseError(400, "Limit must be 0 or above")
 
         try:
             return PaginationConfig(from_tok, to_tok, direction, limit)
diff --git a/synapse/streams/events.py b/synapse/streams/events.py
index f03ad99118..e5220132a3 100644
--- a/synapse/streams/events.py
+++ b/synapse/streams/events.py
@@ -15,13 +15,12 @@
 
 from twisted.internet import defer
 
-from synapse.types import StreamToken
-
+from synapse.handlers.account_data import AccountDataEventSource
 from synapse.handlers.presence import PresenceEventSource
+from synapse.handlers.receipts import ReceiptEventSource
 from synapse.handlers.room import RoomEventSource
 from synapse.handlers.typing import TypingNotificationEventSource
-from synapse.handlers.receipts import ReceiptEventSource
-from synapse.handlers.account_data import AccountDataEventSource
+from synapse.types import StreamToken
 
 
 class EventSources(object):
diff --git a/synapse/types.py b/synapse/types.py
index cc7c182a78..41afb27a74 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -13,11 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import string
+from collections import namedtuple
 
 from synapse.api.errors import SynapseError
 
-from collections import namedtuple
-
 
 class Requester(namedtuple("Requester", [
     "user", "access_token_id", "is_guest", "device_id", "app_service",
@@ -138,7 +137,7 @@ class DomainSpecificString(
     @classmethod
     def from_string(cls, s):
         """Parse the string given by 's' into a structure object."""
-        if len(s) < 1 or s[0] != cls.SIGIL:
+        if len(s) < 1 or s[0:1] != cls.SIGIL:
             raise SynapseError(400, "Expected %s string to start with '%s'" % (
                 cls.__name__, cls.SIGIL,
             ))
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index fc11e26623..680ea928c7 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -13,14 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.util.logcontext import PreserveLoggingContext
+import logging
+from itertools import islice
 
-from twisted.internet import defer, reactor, task
+import attr
 
-import time
-import logging
+from twisted.internet import defer, task
 
-from itertools import islice
+from synapse.util.logcontext import PreserveLoggingContext
 
 logger = logging.getLogger(__name__)
 
@@ -31,16 +31,27 @@ def unwrapFirstError(failure):
     return failure.value.subFailure
 
 
+@attr.s
 class Clock(object):
-    """A small utility that obtains current time-of-day so that time may be
-    mocked during unit-tests.
+    """
+    A Clock wraps a Twisted reactor and provides utilities on top of it.
 
-    TODO(paul): Also move the sleep() functionality into it
+    Args:
+        reactor: The Twisted reactor to use.
     """
+    _reactor = attr.ib()
+
+    @defer.inlineCallbacks
+    def sleep(self, seconds):
+        d = defer.Deferred()
+        with PreserveLoggingContext():
+            self._reactor.callLater(seconds, d.callback, seconds)
+            res = yield d
+        defer.returnValue(res)
 
     def time(self):
         """Returns the current system time in seconds since epoch."""
-        return time.time()
+        return self._reactor.seconds()
 
     def time_msec(self):
         """Returns the current system time in miliseconds since epoch."""
@@ -56,6 +67,7 @@ class Clock(object):
             msec(float): How long to wait between calls in milliseconds.
         """
         call = task.LoopingCall(f)
+        call.clock = self._reactor
         call.start(msec / 1000.0, now=False)
         return call
 
@@ -73,7 +85,7 @@ class Clock(object):
                 callback(*args, **kwargs)
 
         with PreserveLoggingContext():
-            return reactor.callLater(delay, wrapped_callback, *args, **kwargs)
+            return self._reactor.callLater(delay, wrapped_callback, *args, **kwargs)
 
     def cancel_call_later(self, timer, ignore_errs=False):
         try:
diff --git a/synapse/util/async.py b/synapse/util/async_helpers.py
index 9dd4e6b5bc..9b3f2f4b96 100644
--- a/synapse/util/async.py
+++ b/synapse/util/async_helpers.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,42 +13,27 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import collections
+import logging
+from contextlib import contextmanager
 
+from six.moves import range
 
-from twisted.internet import defer, reactor
+from twisted.internet import defer
 from twisted.internet.defer import CancelledError
 from twisted.python import failure
 
+from synapse.util import Clock, logcontext, unwrapFirstError
+
 from .logcontext import (
-    PreserveLoggingContext, make_deferred_yieldable, run_in_background
+    PreserveLoggingContext,
+    make_deferred_yieldable,
+    run_in_background,
 )
-from synapse.util import logcontext, unwrapFirstError
-
-from contextlib import contextmanager
-
-import logging
-
-from six.moves import range
 
 logger = logging.getLogger(__name__)
 
 
-@defer.inlineCallbacks
-def sleep(seconds):
-    d = defer.Deferred()
-    with PreserveLoggingContext():
-        reactor.callLater(seconds, d.callback, seconds)
-        res = yield d
-    defer.returnValue(res)
-
-
-def run_on_reactor():
-    """ This will cause the rest of the function to be invoked upon the next
-    iteration of the main loop
-    """
-    return sleep(0)
-
-
 class ObservableDeferred(object):
     """Wraps a deferred object so that we can add observer deferreds. These
     observer deferreds do not affect the callback chain of the original
@@ -171,86 +157,8 @@ def concurrently_execute(func, args, limit):
 
 
 class Linearizer(object):
-    """Linearizes access to resources based on a key. Useful to ensure only one
-    thing is happening at a time on a given resource.
-
-    Example:
-
-        with (yield linearizer.queue("test_key")):
-            # do some work.
-
-    """
-    def __init__(self, name=None):
-        if name is None:
-            self.name = id(self)
-        else:
-            self.name = name
-        self.key_to_defer = {}
-
-    @defer.inlineCallbacks
-    def queue(self, key):
-        # If there is already a deferred in the queue, we pull it out so that
-        # we can wait on it later.
-        # Then we replace it with a deferred that we resolve *after* the
-        # context manager has exited.
-        # We only return the context manager after the previous deferred has
-        # resolved.
-        # This all has the net effect of creating a chain of deferreds that
-        # wait for the previous deferred before starting their work.
-        current_defer = self.key_to_defer.get(key)
-
-        new_defer = defer.Deferred()
-        self.key_to_defer[key] = new_defer
-
-        if current_defer:
-            logger.info(
-                "Waiting to acquire linearizer lock %r for key %r", self.name, key
-            )
-            try:
-                with PreserveLoggingContext():
-                    yield current_defer
-            except Exception:
-                logger.exception("Unexpected exception in Linearizer")
-
-            logger.info("Acquired linearizer lock %r for key %r", self.name,
-                        key)
-
-            # if the code holding the lock completes synchronously, then it
-            # will recursively run the next claimant on the list. That can
-            # relatively rapidly lead to stack exhaustion. This is essentially
-            # the same problem as http://twistedmatrix.com/trac/ticket/9304.
-            #
-            # In order to break the cycle, we add a cheeky sleep(0) here to
-            # ensure that we fall back to the reactor between each iteration.
-            #
-            # (There's no particular need for it to happen before we return
-            # the context manager, but it needs to happen while we hold the
-            # lock, and the context manager's exit code must be synchronous,
-            # so actually this is the only sensible place.
-            yield run_on_reactor()
-
-        else:
-            logger.info("Acquired uncontended linearizer lock %r for key %r",
-                        self.name, key)
-
-        @contextmanager
-        def _ctx_manager():
-            try:
-                yield
-            finally:
-                logger.info("Releasing linearizer lock %r for key %r", self.name, key)
-                with PreserveLoggingContext():
-                    new_defer.callback(None)
-                current_d = self.key_to_defer.get(key)
-                if current_d is new_defer:
-                    self.key_to_defer.pop(key, None)
-
-        defer.returnValue(_ctx_manager())
-
-
-class Limiter(object):
     """Limits concurrent access to resources based on a key. Useful to ensure
-    only a few thing happen at a time on a given resource.
+    only a few things happen at a time on a given resource.
 
     Example:
 
@@ -258,54 +166,65 @@ class Limiter(object):
             # do some work.
 
     """
-    def __init__(self, max_count):
+    def __init__(self, name=None, max_count=1, clock=None):
         """
         Args:
-            max_count(int): The maximum number of concurrent access
+            max_count(int): The maximum number of concurrent accesses
         """
+        if name is None:
+            self.name = id(self)
+        else:
+            self.name = name
+
+        if not clock:
+            from twisted.internet import reactor
+            clock = Clock(reactor)
+        self._clock = clock
         self.max_count = max_count
 
         # key_to_defer is a map from the key to a 2 element list where
-        # the first element is the number of things executing
-        # the second element is a list of deferreds for the things blocked from
-        # executing.
+        # the first element is the number of things executing, and
+        # the second element is an OrderedDict, where the keys are deferreds for the
+        # things blocked from executing.
         self.key_to_defer = {}
 
-    @defer.inlineCallbacks
     def queue(self, key):
-        entry = self.key_to_defer.setdefault(key, [0, []])
+        # we avoid doing defer.inlineCallbacks here, so that cancellation works correctly.
+        # (https://twistedmatrix.com/trac/ticket/4632 meant that cancellations were not
+        # propagated inside inlineCallbacks until Twisted 18.7)
+        entry = self.key_to_defer.setdefault(key, [0, collections.OrderedDict()])
 
         # If the number of things executing is greater than the maximum
         # then add a deferred to the list of blocked items
-        # When on of the things currently executing finishes it will callback
+        # When one of the things currently executing finishes it will callback
         # this item so that it can continue executing.
         if entry[0] >= self.max_count:
-            new_defer = defer.Deferred()
-            entry[1].append(new_defer)
-
-            logger.info("Waiting to acquire limiter lock for key %r", key)
-            with PreserveLoggingContext():
-                yield new_defer
-            logger.info("Acquired limiter lock for key %r", key)
+            res = self._await_lock(key)
         else:
-            logger.info("Acquired uncontended limiter lock for key %r", key)
+            logger.info(
+                "Acquired uncontended linearizer lock %r for key %r", self.name, key,
+            )
+            entry[0] += 1
+            res = defer.succeed(None)
 
-        entry[0] += 1
+        # once we successfully get the lock, we need to return a context manager which
+        # will release the lock.
 
         @contextmanager
-        def _ctx_manager():
+        def _ctx_manager(_):
             try:
                 yield
             finally:
-                logger.info("Releasing limiter lock for key %r", key)
+                logger.info("Releasing linearizer lock %r for key %r", self.name, key)
 
                 # We've finished executing so check if there are any things
                 # blocked waiting to execute and start one of them
                 entry[0] -= 1
 
                 if entry[1]:
-                    next_def = entry[1].pop(0)
+                    (next_def, _) = entry[1].popitem(last=False)
 
+                    # we need to run the next thing in the sentinel context.
                     with PreserveLoggingContext():
                         next_def.callback(None)
                 elif entry[0] == 0:
@@ -313,7 +232,64 @@ class Limiter(object):
                     # map.
                     del self.key_to_defer[key]
 
-        defer.returnValue(_ctx_manager())
+        res.addCallback(_ctx_manager)
+        return res
+
+    def _await_lock(self, key):
+        """Helper for queue: adds a deferred to the queue
+
+        Assumes that we've already checked that we've reached the limit of the number
+        of lock-holders we allow. Creates a new deferred which is added to the list, and
+        adds some management around cancellations.
+
+        Returns the deferred, which will callback once we have secured the lock.
+
+        """
+        entry = self.key_to_defer[key]
+
+        logger.info(
+            "Waiting to acquire linearizer lock %r for key %r", self.name, key,
+        )
+
+        new_defer = make_deferred_yieldable(defer.Deferred())
+        entry[1][new_defer] = 1
+
+        def cb(_r):
+            logger.info("Acquired linearizer lock %r for key %r", self.name, key)
+            entry[0] += 1
+
+            # if the code holding the lock completes synchronously, then it
+            # will recursively run the next claimant on the list. That can
+            # relatively rapidly lead to stack exhaustion. This is essentially
+            # the same problem as http://twistedmatrix.com/trac/ticket/9304.
+            #
+            # In order to break the cycle, we add a cheeky sleep(0) here to
+            # ensure that we fall back to the reactor between each iteration.
+            #
+            # (This needs to happen while we hold the lock, and the context manager's exit
+            # code must be synchronous, so this is the only sensible place.)
+            return self._clock.sleep(0)
+
+        def eb(e):
+            logger.info("defer %r got err %r", new_defer, e)
+            if isinstance(e, CancelledError):
+                logger.info(
+                    "Cancelling wait for linearizer lock %r for key %r",
+                    self.name, key,
+                )
+
+            else:
+                logger.warn(
+                    "Unexpected exception waiting for linearizer lock %r for key %r",
+                    self.name, key,
+                )
+
+            # we just have to take ourselves back out of the queue.
+            del entry[1][new_defer]
+            return e
+
+        new_defer.addCallbacks(cb, eb)
+        return new_defer
 
 
 class ReadWriteLock(object):
@@ -404,7 +380,7 @@ class DeferredTimeoutError(Exception):
     """
 
 
-def add_timeout_to_deferred(deferred, timeout, on_timeout_cancel=None):
+def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None):
     """
     Add a timeout to a deferred by scheduling it to be cancelled after
     timeout seconds.
@@ -419,6 +395,7 @@ def add_timeout_to_deferred(deferred, timeout, on_timeout_cancel=None):
     Args:
         deferred (defer.Deferred): deferred to be timed out
         timeout (Number): seconds to time out after
+        reactor (twisted.internet.reactor): the Twisted reactor to use
 
         on_timeout_cancel (callable): A callable which is called immediately
             after the deferred times out, and not if this deferred is
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index 900575eb3c..7b065b195e 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -13,12 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from prometheus_client.core import Gauge, REGISTRY, GaugeMetricFamily
-
 import os
 
-from six.moves import intern
 import six
+from six.moves import intern
+
+from prometheus_client.core import REGISTRY, Gauge, GaugeMetricFamily
 
 CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5))
 
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 65a1042de1..187510576a 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -13,10 +13,19 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import functools
+import inspect
 import logging
+import threading
+from collections import namedtuple
+
+import six
+from six import itervalues, string_types
+
+from twisted.internet import defer
 
-from synapse.util.async import ObservableDeferred
-from synapse.util import unwrapFirstError, logcontext
+from synapse.util import logcontext, unwrapFirstError
+from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.caches import get_cache_factor_for
 from synapse.util.caches.lrucache import LruCache
 from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
@@ -24,17 +33,6 @@ from synapse.util.stringutils import to_ascii
 
 from . import register_cache
 
-from twisted.internet import defer
-from collections import namedtuple
-
-import functools
-import inspect
-import threading
-
-from six import string_types, itervalues
-import six
-
-
 logger = logging.getLogger(__name__)
 
 
@@ -475,105 +473,101 @@ class CacheListDescriptor(_CacheDescriptorBase):
 
         @functools.wraps(self.orig)
         def wrapped(*args, **kwargs):
-            # If we're passed a cache_context then we'll want to call its invalidate()
-            # whenever we are invalidated
+            # If we're passed a cache_context then we'll want to call its
+            # invalidate() whenever we are invalidated
             invalidate_callback = kwargs.pop("on_invalidate", None)
 
             arg_dict = inspect.getcallargs(self.orig, obj, *args, **kwargs)
             keyargs = [arg_dict[arg_nm] for arg_nm in self.arg_names]
             list_args = arg_dict[self.list_name]
 
-            # cached is a dict arg -> deferred, where deferred results in a
-            # 2-tuple (`arg`, `result`)
             results = {}
-            cached_defers = {}
-            missing = []
+
+            def update_results_dict(res, arg):
+                results[arg] = res
+
+            # list of deferreds to wait for
+            cached_defers = []
+
+            missing = set()
 
             # If the cache takes a single arg then that is used as the key,
             # otherwise a tuple is used.
             if num_args == 1:
-                def cache_get(arg):
-                    return cache.get(arg, callback=invalidate_callback)
+                def arg_to_cache_key(arg):
+                    return arg
             else:
-                key = list(keyargs)
+                keylist = list(keyargs)
 
-                def cache_get(arg):
-                    key[self.list_pos] = arg
-                    return cache.get(tuple(key), callback=invalidate_callback)
+                def arg_to_cache_key(arg):
+                    keylist[self.list_pos] = arg
+                    return tuple(keylist)
 
             for arg in list_args:
                 try:
-                    res = cache_get(arg)
-
+                    res = cache.get(arg_to_cache_key(arg),
+                                    callback=invalidate_callback)
                     if not isinstance(res, ObservableDeferred):
                         results[arg] = res
                     elif not res.has_succeeded():
                         res = res.observe()
-                        res.addCallback(lambda r, arg: (arg, r), arg)
-                        cached_defers[arg] = res
+                        res.addCallback(update_results_dict, arg)
+                        cached_defers.append(res)
                     else:
                         results[arg] = res.get_result()
                 except KeyError:
-                    missing.append(arg)
+                    missing.add(arg)
 
             if missing:
+                # we need an observable deferred for each entry in the list,
+                # which we put in the cache. Each deferred resolves with the
+                # relevant result for that key.
+                deferreds_map = {}
+                for arg in missing:
+                    deferred = defer.Deferred()
+                    deferreds_map[arg] = deferred
+                    key = arg_to_cache_key(arg)
+                    observable = ObservableDeferred(deferred)
+                    cache.set(key, observable, callback=invalidate_callback)
+
+                def complete_all(res):
+                    # the wrapped function has completed. It returns a
+                    # a dict. We can now resolve the observable deferreds in
+                    # the cache and update our own result map.
+                    for e in missing:
+                        val = res.get(e, None)
+                        deferreds_map[e].callback(val)
+                        results[e] = val
+
+                def errback(f):
+                    # the wrapped function has failed. Invalidate any cache
+                    # entries we're supposed to be populating, and fail
+                    # their deferreds.
+                    for e in missing:
+                        key = arg_to_cache_key(e)
+                        cache.invalidate(key)
+                        deferreds_map[e].errback(f)
+
+                    # return the failure, to propagate to our caller.
+                    return f
+
                 args_to_call = dict(arg_dict)
-                args_to_call[self.list_name] = missing
+                args_to_call[self.list_name] = list(missing)
 
-                ret_d = defer.maybeDeferred(
+                cached_defers.append(defer.maybeDeferred(
                     logcontext.preserve_fn(self.function_to_call),
                     **args_to_call
-                )
-
-                ret_d = ObservableDeferred(ret_d)
-
-                # We need to create deferreds for each arg in the list so that
-                # we can insert the new deferred into the cache.
-                for arg in missing:
-                    observer = ret_d.observe()
-                    observer.addCallback(lambda r, arg: r.get(arg, None), arg)
-
-                    observer = ObservableDeferred(observer)
-
-                    if num_args == 1:
-                        cache.set(
-                            arg, observer,
-                            callback=invalidate_callback
-                        )
-
-                        def invalidate(f, key):
-                            cache.invalidate(key)
-                            return f
-                        observer.addErrback(invalidate, arg)
-                    else:
-                        key = list(keyargs)
-                        key[self.list_pos] = arg
-                        cache.set(
-                            tuple(key), observer,
-                            callback=invalidate_callback
-                        )
-
-                        def invalidate(f, key):
-                            cache.invalidate(key)
-                            return f
-                        observer.addErrback(invalidate, tuple(key))
-
-                    res = observer.observe()
-                    res.addCallback(lambda r, arg: (arg, r), arg)
-
-                    cached_defers[arg] = res
+                ).addCallbacks(complete_all, errback))
 
             if cached_defers:
-                def update_results_dict(res):
-                    results.update(res)
-                    return results
-
-                return logcontext.make_deferred_yieldable(defer.gatherResults(
-                    list(cached_defers.values()),
+                d = defer.gatherResults(
+                    cached_defers,
                     consumeErrors=True,
-                ).addCallback(update_results_dict).addErrback(
+                ).addCallbacks(
+                    lambda _: results,
                     unwrapFirstError
-                ))
+                )
+                return logcontext.make_deferred_yieldable(d)
             else:
                 return results
 
@@ -627,7 +621,8 @@ def cachedList(cached_method_name, list_name, num_args=None, inlineCallbacks=Fal
     cache.
 
     Args:
-        cache (Cache): The underlying cache to use.
+        cached_method_name (str): The name of the single-item lookup method.
+            This is only used to find the cache to use.
         list_name (str): The name of the argument that is the list to use to
             do batch lookups in the cache.
         num_args (int): Number of arguments to use as the key in the cache
diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py
index bdc21e348f..6c0b5a4094 100644
--- a/synapse/util/caches/dictionary_cache.py
+++ b/synapse/util/caches/dictionary_cache.py
@@ -13,12 +13,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.util.caches.lrucache import LruCache
-from collections import namedtuple
-from . import register_cache
-import threading
 import logging
+import threading
+from collections import namedtuple
 
+from synapse.util.caches.lrucache import LruCache
+
+from . import register_cache
 
 logger = logging.getLogger(__name__)
 
@@ -107,29 +108,28 @@ class DictionaryCache(object):
         self.sequence += 1
         self.cache.clear()
 
-    def update(self, sequence, key, value, full=False, known_absent=None):
+    def update(self, sequence, key, value, fetched_keys=None):
         """Updates the entry in the cache
 
         Args:
             sequence
-            key
-            value (dict): The value to update the cache with.
-            full (bool): Whether the given value is the full dict, or just a
-                partial subset there of. If not full then any existing entries
-                for the key will be updated.
-            known_absent (set): Set of keys that we know don't exist in the full
-                dict.
+            key (K)
+            value (dict[X,Y]): The value to update the cache with.
+            fetched_keys (None|set[X]): All of the dictionary keys which were
+                fetched from the database.
+
+                If None, this is the complete value for key K. Otherwise, it
+                is used to infer a list of keys which we know don't exist in
+                the full dict.
         """
         self.check_thread()
         if self.sequence == sequence:
             # Only update the cache if the caches sequence number matches the
             # number that the cache had before the SELECT was started (SYN-369)
-            if known_absent is None:
-                known_absent = set()
-            if full:
-                self._insert(key, value, known_absent)
+            if fetched_keys is None:
+                self._insert(key, value, set())
             else:
-                self._update_or_insert(key, value, known_absent)
+                self._update_or_insert(key, value, fetched_keys)
 
     def _update_or_insert(self, key, value, known_absent):
         # We pop and reinsert as we need to tell the cache the size may have
diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
index ff04c91955..ce85b2ae11 100644
--- a/synapse/util/caches/expiringcache.py
+++ b/synapse/util/caches/expiringcache.py
@@ -13,11 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.util.caches import register_cache
-
-from collections import OrderedDict
 import logging
+from collections import OrderedDict
 
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.caches import register_cache
 
 logger = logging.getLogger(__name__)
 
@@ -64,7 +64,10 @@ class ExpiringCache(object):
             return
 
         def f():
-            self._prune_cache()
+            return run_as_background_process(
+                "prune_cache_%s" % self._cache_name,
+                self._prune_cache,
+            )
 
         self._clock.looping_call(f, self._expiry_ms / 2)
 
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index 1c5a982094..b684f24e7b 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -14,8 +14,8 @@
 # limitations under the License.
 
 
-from functools import wraps
 import threading
+from functools import wraps
 
 from synapse.util.caches.treecache import TreeCache
 
diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index a8491b42d5..afb03b2e1b 100644
--- a/synapse/util/caches/response_cache.py
+++ b/synapse/util/caches/response_cache.py
@@ -16,7 +16,7 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.util.async import ObservableDeferred
+from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.caches import register_cache
 from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 
diff --git a/synapse/util/caches/snapshot_cache.py b/synapse/util/caches/snapshot_cache.py
index d03678b8c8..8318db8d2c 100644
--- a/synapse/util/caches/snapshot_cache.py
+++ b/synapse/util/caches/snapshot_cache.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.util.async import ObservableDeferred
+from synapse.util.async_helpers import ObservableDeferred
 
 
 class SnapshotCache(object):
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index 817118e30f..f2bde74dc5 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -13,12 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.util import caches
-
+import logging
 
 from sortedcontainers import SortedDict
-import logging
 
+from synapse.util import caches
 
 logger = logging.getLogger(__name__)
 
@@ -75,13 +74,13 @@ class StreamChangeCache(object):
         assert type(stream_pos) is int
 
         if stream_pos >= self._earliest_known_stream_pos:
-            not_known_entities = set(entities) - set(self._entity_to_key)
+            changed_entities = {
+                self._cache[k] for k in self._cache.islice(
+                    start=self._cache.bisect_right(stream_pos),
+                )
+            }
 
-            result = (
-                set(self._cache.values()[self._cache.bisect_right(stream_pos) :])
-                .intersection(entities)
-                .union(not_known_entities)
-            )
+            result = changed_entities.intersection(entities)
 
             self.metrics.inc_hits()
         else:
@@ -113,7 +112,8 @@ class StreamChangeCache(object):
         assert type(stream_pos) is int
 
         if stream_pos >= self._earliest_known_stream_pos:
-            return self._cache.values()[self._cache.bisect_right(stream_pos) :]
+            return [self._cache[k] for k in self._cache.islice(
+                start=self._cache.bisect_right(stream_pos))]
         else:
             return None
 
diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py
index 734331caaa..194da87639 100644
--- a/synapse/util/distributor.py
+++ b/synapse/util/distributor.py
@@ -17,20 +17,18 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.util import unwrapFirstError
-from synapse.util.logcontext import PreserveLoggingContext
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 
 logger = logging.getLogger(__name__)
 
 
 def user_left_room(distributor, user, room_id):
-    with PreserveLoggingContext():
-        distributor.fire("user_left_room", user=user, room_id=room_id)
+    distributor.fire("user_left_room", user=user, room_id=room_id)
 
 
 def user_joined_room(distributor, user, room_id):
-    with PreserveLoggingContext():
-        distributor.fire("user_joined_room", user=user, room_id=room_id)
+    distributor.fire("user_joined_room", user=user, room_id=room_id)
 
 
 class Distributor(object):
@@ -44,9 +42,7 @@ class Distributor(object):
       model will do for today.
     """
 
-    def __init__(self, suppress_failures=True):
-        self.suppress_failures = suppress_failures
-
+    def __init__(self):
         self.signals = {}
         self.pre_registration = {}
 
@@ -56,7 +52,6 @@ class Distributor(object):
 
         self.signals[name] = Signal(
             name,
-            suppress_failures=self.suppress_failures,
         )
 
         if name in self.pre_registration:
@@ -75,10 +70,18 @@ class Distributor(object):
             self.pre_registration[name].append(observer)
 
     def fire(self, name, *args, **kwargs):
+        """Dispatches the given signal to the registered observers.
+
+        Runs the observers as a background process. Does not return a deferred.
+        """
         if name not in self.signals:
             raise KeyError("%r does not have a signal named %s" % (self, name))
 
-        return self.signals[name].fire(*args, **kwargs)
+        run_as_background_process(
+            name,
+            self.signals[name].fire,
+            *args, **kwargs
+        )
 
 
 class Signal(object):
@@ -91,9 +94,8 @@ class Signal(object):
     method into all of the observers.
     """
 
-    def __init__(self, name, suppress_failures):
+    def __init__(self, name):
         self.name = name
-        self.suppress_failures = suppress_failures
         self.observers = []
 
     def observe(self, observer):
@@ -103,7 +105,6 @@ class Signal(object):
         Each observer callable may return a Deferred."""
         self.observers.append(observer)
 
-    @defer.inlineCallbacks
     def fire(self, *args, **kwargs):
         """Invokes every callable in the observer list, passing in the args and
         kwargs. Exceptions thrown by observers are logged but ignored. It is
@@ -121,22 +122,17 @@ class Signal(object):
                         failure.type,
                         failure.value,
                         failure.getTracebackObject()))
-                if not self.suppress_failures:
-                    return failure
 
             return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb)
 
-        with PreserveLoggingContext():
-            deferreds = [
-                do(observer)
-                for observer in self.observers
-            ]
-
-            res = yield defer.gatherResults(
-                deferreds, consumeErrors=True
-            ).addErrback(unwrapFirstError)
+        deferreds = [
+            run_in_background(do, o)
+            for o in self.observers
+        ]
 
-        defer.returnValue(res)
+        return make_deferred_yieldable(defer.gatherResults(
+            deferreds, consumeErrors=True,
+        ))
 
     def __repr__(self):
         return "<Signal name=%r>" % (self.name,)
diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py
index 3380970e4e..629ed44149 100644
--- a/synapse/util/file_consumer.py
+++ b/synapse/util/file_consumer.py
@@ -13,11 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import threads, reactor
+from six.moves import queue
 
-from synapse.util.logcontext import make_deferred_yieldable, run_in_background
+from twisted.internet import threads
 
-from six.moves import queue
+from synapse.util.logcontext import make_deferred_yieldable, run_in_background
 
 
 class BackgroundFileConsumer(object):
@@ -27,6 +27,7 @@ class BackgroundFileConsumer(object):
     Args:
         file_obj (file): The file like object to write to. Closed when
             finished.
+        reactor (twisted.internet.reactor): the Twisted reactor to use
     """
 
     # For PushProducers pause if we have this many unwritten slices
@@ -34,9 +35,11 @@ class BackgroundFileConsumer(object):
     # And resume once the size of the queue is less than this
     _RESUME_ON_QUEUE_SIZE = 2
 
-    def __init__(self, file_obj):
+    def __init__(self, file_obj, reactor):
         self._file_obj = file_obj
 
+        self._reactor = reactor
+
         # Producer we're registered with
         self._producer = None
 
@@ -71,7 +74,10 @@ class BackgroundFileConsumer(object):
         self._producer = producer
         self.streaming = streaming
         self._finished_deferred = run_in_background(
-            threads.deferToThread, self._writer
+            threads.deferToThreadPool,
+            self._reactor,
+            self._reactor.getThreadPool(),
+            self._writer,
         )
         if not streaming:
             self._producer.resumeProducing()
@@ -109,7 +115,7 @@ class BackgroundFileConsumer(object):
                 # producer.
                 if self._producer and self._paused_producer:
                     if self._bytes_queue.qsize() <= self._RESUME_ON_QUEUE_SIZE:
-                        reactor.callFromThread(self._resume_paused_producer)
+                        self._reactor.callFromThread(self._resume_paused_producer)
 
                 bytes = self._bytes_queue.get()
 
@@ -121,7 +127,7 @@ class BackgroundFileConsumer(object):
                 # If its a pull producer then we need to explicitly ask for
                 # more stuff.
                 if not self.streaming and self._producer:
-                    reactor.callFromThread(self._producer.resumeProducing)
+                    self._reactor.callFromThread(self._producer.resumeProducing)
         except Exception as e:
             self._write_exception = e
             raise
diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py
index 15f0a7ba9e..014edea971 100644
--- a/synapse/util/frozenutils.py
+++ b/synapse/util/frozenutils.py
@@ -13,10 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from frozendict import frozendict
-import simplejson as json
+from six import binary_type, text_type
 
-from six import string_types
+from canonicaljson import json
+from frozendict import frozendict
 
 
 def freeze(o):
@@ -26,7 +26,7 @@ def freeze(o):
     if isinstance(o, frozendict):
         return o
 
-    if isinstance(o, string_types):
+    if isinstance(o, (binary_type, text_type)):
         return o
 
     try:
@@ -41,7 +41,7 @@ def unfreeze(o):
     if isinstance(o, (dict, frozendict)):
         return dict({k: unfreeze(v) for k, v in o.items()})
 
-    if isinstance(o, string_types):
+    if isinstance(o, (binary_type, text_type)):
         return o
 
     try:
diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py
index e9f0f292ee..2d7ddc1cbe 100644
--- a/synapse/util/httpresourcetree.py
+++ b/synapse/util/httpresourcetree.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.web.resource import NoResource
-
 import logging
 
+from twisted.web.resource import NoResource
+
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py
index a58c723403..a0c2d37610 100644
--- a/synapse/util/logcontext.py
+++ b/synapse/util/logcontext.py
@@ -22,10 +22,10 @@ them.
 See doc/log_contexts.rst for details on how this works.
 """
 
-from twisted.internet import defer
-
-import threading
 import logging
+import threading
+
+from twisted.internet import defer
 
 logger = logging.getLogger(__name__)
 
@@ -49,17 +49,118 @@ except Exception:
         return None
 
 
+class ContextResourceUsage(object):
+    """Object for tracking the resources used by a log context
+
+    Attributes:
+        ru_utime (float): user CPU time (in seconds)
+        ru_stime (float): system CPU time (in seconds)
+        db_txn_count (int): number of database transactions done
+        db_sched_duration_sec (float): amount of time spent waiting for a
+            database connection
+        db_txn_duration_sec (float): amount of time spent doing database
+            transactions (excluding scheduling time)
+        evt_db_fetch_count (int): number of events requested from the database
+    """
+
+    __slots__ = [
+        "ru_stime", "ru_utime",
+        "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec",
+        "evt_db_fetch_count",
+    ]
+
+    def __init__(self, copy_from=None):
+        """Create a new ContextResourceUsage
+
+        Args:
+            copy_from (ContextResourceUsage|None): if not None, an object to
+                copy stats from
+        """
+        if copy_from is None:
+            self.reset()
+        else:
+            self.ru_utime = copy_from.ru_utime
+            self.ru_stime = copy_from.ru_stime
+            self.db_txn_count = copy_from.db_txn_count
+
+            self.db_txn_duration_sec = copy_from.db_txn_duration_sec
+            self.db_sched_duration_sec = copy_from.db_sched_duration_sec
+            self.evt_db_fetch_count = copy_from.evt_db_fetch_count
+
+    def copy(self):
+        return ContextResourceUsage(copy_from=self)
+
+    def reset(self):
+        self.ru_stime = 0.
+        self.ru_utime = 0.
+        self.db_txn_count = 0
+
+        self.db_txn_duration_sec = 0
+        self.db_sched_duration_sec = 0
+        self.evt_db_fetch_count = 0
+
+    def __repr__(self):
+        return ("<ContextResourceUsage ru_stime='%r', ru_utime='%r', "
+                "db_txn_count='%r', db_txn_duration_sec='%r', "
+                "db_sched_duration_sec='%r', evt_db_fetch_count='%r'>") % (
+                    self.ru_stime,
+                    self.ru_utime,
+                    self.db_txn_count,
+                    self.db_txn_duration_sec,
+                    self.db_sched_duration_sec,
+                    self.evt_db_fetch_count,)
+
+    def __iadd__(self, other):
+        """Add another ContextResourceUsage's stats to this one's.
+
+        Args:
+            other (ContextResourceUsage): the other resource usage object
+        """
+        self.ru_utime += other.ru_utime
+        self.ru_stime += other.ru_stime
+        self.db_txn_count += other.db_txn_count
+        self.db_txn_duration_sec += other.db_txn_duration_sec
+        self.db_sched_duration_sec += other.db_sched_duration_sec
+        self.evt_db_fetch_count += other.evt_db_fetch_count
+        return self
+
+    def __isub__(self, other):
+        self.ru_utime -= other.ru_utime
+        self.ru_stime -= other.ru_stime
+        self.db_txn_count -= other.db_txn_count
+        self.db_txn_duration_sec -= other.db_txn_duration_sec
+        self.db_sched_duration_sec -= other.db_sched_duration_sec
+        self.evt_db_fetch_count -= other.evt_db_fetch_count
+        return self
+
+    def __add__(self, other):
+        res = ContextResourceUsage(copy_from=self)
+        res += other
+        return res
+
+    def __sub__(self, other):
+        res = ContextResourceUsage(copy_from=self)
+        res -= other
+        return res
+
+
 class LoggingContext(object):
     """Additional context for log formatting. Contexts are scoped within a
     "with" block.
 
+    If a parent is given when creating a new context, then:
+        - logging fields are copied from the parent to the new context on entry
+        - when the new context exits, the cpu usage stats are copied from the
+          child to the parent
+
     Args:
         name (str): Name for the context for debugging.
+        parent_context (LoggingContext|None): The parent of the new context
     """
 
     __slots__ = [
-        "previous_context", "name", "ru_stime", "ru_utime",
-        "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec",
+        "previous_context", "name", "parent_context",
+        "_resource_usage",
         "usage_start",
         "main_thread", "alive",
         "request", "tag",
@@ -90,24 +191,21 @@ class LoggingContext(object):
         def add_database_scheduled(self, sched_sec):
             pass
 
+        def record_event_fetch(self, event_count):
+            pass
+
         def __nonzero__(self):
             return False
         __bool__ = __nonzero__  # python3
 
     sentinel = Sentinel()
 
-    def __init__(self, name=None):
+    def __init__(self, name=None, parent_context=None):
         self.previous_context = LoggingContext.current_context()
         self.name = name
-        self.ru_stime = 0.
-        self.ru_utime = 0.
-        self.db_txn_count = 0
-
-        # sec spent waiting for db txns, excluding scheduling time
-        self.db_txn_duration_sec = 0
 
-        # sec spent waiting for db txns to be scheduled
-        self.db_sched_duration_sec = 0
+        # track the resources used by this context so far
+        self._resource_usage = ContextResourceUsage()
 
         # If alive has the thread resource usage when the logcontext last
         # became active.
@@ -118,6 +216,8 @@ class LoggingContext(object):
         self.tag = ""
         self.alive = True
 
+        self.parent_context = parent_context
+
     def __str__(self):
         return "%s@%x" % (self.name, id(self))
 
@@ -155,6 +255,10 @@ class LoggingContext(object):
                 self.previous_context, old_context
             )
         self.alive = True
+
+        if self.parent_context is not None:
+            self.parent_context.copy_to(self)
+
         return self
 
     def __exit__(self, type, value, traceback):
@@ -176,6 +280,13 @@ class LoggingContext(object):
         self.previous_context = None
         self.alive = False
 
+        # if we have a parent, pass our CPU usage stats on
+        if self.parent_context is not None:
+            self.parent_context._resource_usage += self._resource_usage
+
+            # reset them in case we get entered again
+            self._resource_usage.reset()
+
     def copy_to(self, record):
         """Copy logging fields from this context to a log record or
         another LoggingContext
@@ -200,39 +311,43 @@ class LoggingContext(object):
             logger.warning("Stopped logcontext %s on different thread", self)
             return
 
-        # When we stop, let's record the resource used since we started
-        if self.usage_start:
-            usage_end = get_thread_resource_usage()
+        # When we stop, let's record the cpu used since we started
+        if not self.usage_start:
+            logger.warning(
+                "Called stop on logcontext %s without calling start", self,
+            )
+            return
+
+        usage_end = get_thread_resource_usage()
 
-            self.ru_utime += usage_end.ru_utime - self.usage_start.ru_utime
-            self.ru_stime += usage_end.ru_stime - self.usage_start.ru_stime
+        self._resource_usage.ru_utime += usage_end.ru_utime - self.usage_start.ru_utime
+        self._resource_usage.ru_stime += usage_end.ru_stime - self.usage_start.ru_stime
 
-            self.usage_start = None
-        else:
-            logger.warning("Called stop on logcontext %s without calling start", self)
+        self.usage_start = None
 
     def get_resource_usage(self):
-        """Get CPU time used by this logcontext so far.
+        """Get resources used by this logcontext so far.
 
         Returns:
-            tuple[float, float]: The user and system CPU usage in seconds
+            ContextResourceUsage: a *copy* of the object tracking resource
+                usage so far
         """
-        ru_utime = self.ru_utime
-        ru_stime = self.ru_stime
+        # we always return a copy, for consistency
+        res = self._resource_usage.copy()
 
         # If we are on the correct thread and we're currently running then we
         # can include resource usage so far.
         is_main_thread = threading.current_thread() is self.main_thread
         if self.alive and self.usage_start and is_main_thread:
             current = get_thread_resource_usage()
-            ru_utime += current.ru_utime - self.usage_start.ru_utime
-            ru_stime += current.ru_stime - self.usage_start.ru_stime
+            res.ru_utime += current.ru_utime - self.usage_start.ru_utime
+            res.ru_stime += current.ru_stime - self.usage_start.ru_stime
 
-        return ru_utime, ru_stime
+        return res
 
     def add_database_transaction(self, duration_sec):
-        self.db_txn_count += 1
-        self.db_txn_duration_sec += duration_sec
+        self._resource_usage.db_txn_count += 1
+        self._resource_usage.db_txn_duration_sec += duration_sec
 
     def add_database_scheduled(self, sched_sec):
         """Record a use of the database pool
@@ -241,7 +356,15 @@ class LoggingContext(object):
             sched_sec (float): number of seconds it took us to get a
                 connection
         """
-        self.db_sched_duration_sec += sched_sec
+        self._resource_usage.db_sched_duration_sec += sched_sec
+
+    def record_event_fetch(self, event_count):
+        """Record a number of events being fetched from the db
+
+        Args:
+            event_count (int): number of events being fetched
+        """
+        self._resource_usage.evt_db_fetch_count += event_count
 
 
 class LoggingContextFilter(logging.Filter):
@@ -262,7 +385,13 @@ class LoggingContextFilter(logging.Filter):
         context = LoggingContext.current_context()
         for key, value in self.defaults.items():
             setattr(record, key, value)
-        context.copy_to(record)
+
+        # context should never be None, but if it somehow ends up being, then
+        # we end up in a death spiral of infinite loops, so let's check, for
+        # robustness' sake.
+        if context is not None:
+            context.copy_to(record)
+
         return True
 
 
@@ -273,7 +402,9 @@ class PreserveLoggingContext(object):
 
     __slots__ = ["current_context", "new_context", "has_parent"]
 
-    def __init__(self, new_context=LoggingContext.sentinel):
+    def __init__(self, new_context=None):
+        if new_context is None:
+            new_context = LoggingContext.sentinel
         self.new_context = new_context
 
     def __enter__(self):
@@ -403,7 +534,7 @@ _to_ignore = [
     "synapse.util.logcontext",
     "synapse.http.server",
     "synapse.storage._base",
-    "synapse.util.async",
+    "synapse.util.async_helpers",
 ]
 
 
diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py
index 3e42868ea9..a46bc47ce3 100644
--- a/synapse/util/logformatter.py
+++ b/synapse/util/logformatter.py
@@ -14,10 +14,11 @@
 # limitations under the License.
 
 
-from six import StringIO
 import logging
 import traceback
 
+from six import StringIO
+
 
 class LogFormatter(logging.Formatter):
     """Log formatter which gives more detail for exceptions
diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py
index 03249c5dc8..ef31458226 100644
--- a/synapse/util/logutils.py
+++ b/synapse/util/logutils.py
@@ -14,13 +14,13 @@
 # limitations under the License.
 
 
-from inspect import getcallargs
-from functools import wraps
-
-import logging
 import inspect
+import logging
 import time
+from functools import wraps
+from inspect import getcallargs
 
+from six import PY3
 
 _TIME_FUNC_ID = 0
 
@@ -30,8 +30,12 @@ def _log_debug_as_f(f, msg, msg_args):
     logger = logging.getLogger(name)
 
     if logger.isEnabledFor(logging.DEBUG):
-        lineno = f.func_code.co_firstlineno
-        pathname = f.func_code.co_filename
+        if PY3:
+            lineno = f.__code__.co_firstlineno
+            pathname = f.__code__.co_filename
+        else:
+            lineno = f.func_code.co_firstlineno
+            pathname = f.func_code.co_filename
 
         record = logging.LogRecord(
             name=name,
diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py
index 97e0f00b67..14be3c7396 100644
--- a/synapse/util/manhole.py
+++ b/synapse/util/manhole.py
@@ -12,11 +12,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.conch.manhole import ColoredManhole
-from twisted.conch.insults import insults
 from twisted.conch import manhole_ssh
-from twisted.cred import checkers, portal
+from twisted.conch.insults import insults
+from twisted.conch.manhole import ColoredManhole
 from twisted.conch.ssh.keys import Key
+from twisted.cred import checkers, portal
 
 PUBLIC_KEY = (
     "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az"
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 1ba7d65c7c..97f1267380 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -13,14 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
+import logging
+from functools import wraps
 
 from prometheus_client import Counter
-from synapse.util.logcontext import LoggingContext
 
-from functools import wraps
-import logging
+from twisted.internet import defer
 
+from synapse.util.logcontext import LoggingContext
 
 logger = logging.getLogger(__name__)
 
@@ -60,10 +60,9 @@ def measure_func(name):
 
 class Measure(object):
     __slots__ = [
-        "clock", "name", "start_context", "start", "new_context", "ru_utime",
-        "ru_stime",
-        "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec",
+        "clock", "name", "start_context", "start",
         "created_context",
+        "start_usage",
     ]
 
     def __init__(self, clock, name):
@@ -81,10 +80,7 @@ class Measure(object):
             self.start_context.__enter__()
             self.created_context = True
 
-        self.ru_utime, self.ru_stime = self.start_context.get_resource_usage()
-        self.db_txn_count = self.start_context.db_txn_count
-        self.db_txn_duration_sec = self.start_context.db_txn_duration_sec
-        self.db_sched_duration_sec = self.start_context.db_sched_duration_sec
+        self.start_usage = self.start_context.get_resource_usage()
 
     def __exit__(self, exc_type, exc_val, exc_tb):
         if isinstance(exc_type, Exception) or not self.start_context:
@@ -108,15 +104,19 @@ class Measure(object):
             logger.warn("Expected context. (%r)", self.name)
             return
 
-        ru_utime, ru_stime = context.get_resource_usage()
-
-        block_ru_utime.labels(self.name).inc(ru_utime - self.ru_utime)
-        block_ru_stime.labels(self.name).inc(ru_stime - self.ru_stime)
-        block_db_txn_count.labels(self.name).inc(context.db_txn_count - self.db_txn_count)
-        block_db_txn_duration.labels(self.name).inc(
-            context.db_txn_duration_sec - self.db_txn_duration_sec)
-        block_db_sched_duration.labels(self.name).inc(
-            context.db_sched_duration_sec - self.db_sched_duration_sec)
+        current = context.get_resource_usage()
+        usage = current - self.start_usage
+        try:
+            block_ru_utime.labels(self.name).inc(usage.ru_utime)
+            block_ru_stime.labels(self.name).inc(usage.ru_stime)
+            block_db_txn_count.labels(self.name).inc(usage.db_txn_count)
+            block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec)
+            block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec)
+        except ValueError:
+            logger.warn(
+                "Failed to save metrics! OLD: %r, NEW: %r",
+                self.start_usage, current
+            )
 
         if self.created_context:
             self.start_context.__exit__(exc_type, exc_val, exc_tb)
diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py
index 607161e7f0..a6c30e5265 100644
--- a/synapse/util/msisdn.py
+++ b/synapse/util/msisdn.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import phonenumbers
+
 from synapse.api.errors import SynapseError
 
 
diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py
index 0ab63c3d7d..7deb38f2a7 100644
--- a/synapse/util/ratelimitutils.py
+++ b/synapse/util/ratelimitutils.py
@@ -13,21 +13,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import collections
+import contextlib
+import logging
+
 from twisted.internet import defer
 
 from synapse.api.errors import LimitExceededError
-
-from synapse.util.async import sleep
 from synapse.util.logcontext import (
-    run_in_background, make_deferred_yieldable,
     PreserveLoggingContext,
+    make_deferred_yieldable,
+    run_in_background,
 )
 
-import collections
-import contextlib
-import logging
-
-
 logger = logging.getLogger(__name__)
 
 
@@ -94,13 +92,22 @@ class _PerHostRatelimiter(object):
 
         self.window_size = window_size
         self.sleep_limit = sleep_limit
-        self.sleep_msec = sleep_msec
+        self.sleep_sec = sleep_msec / 1000.0
         self.reject_limit = reject_limit
         self.concurrent_requests = concurrent_requests
 
+        # request_id objects for requests which have been slept
         self.sleeping_requests = set()
+
+        # map from request_id object to Deferred for requests which are ready
+        # for processing but have been queued
         self.ready_request_queue = collections.OrderedDict()
+
+        # request id objects for requests which are in progress
         self.current_processing = set()
+
+        # times at which we have recently (within the last window_size ms)
+        # received requests.
         self.request_times = []
 
     @contextlib.contextmanager
@@ -119,11 +126,15 @@ class _PerHostRatelimiter(object):
 
     def _on_enter(self, request_id):
         time_now = self.clock.time_msec()
+
+        # remove any entries from request_times which aren't within the window
         self.request_times[:] = [
             r for r in self.request_times
             if time_now - r < self.window_size
         ]
 
+        # reject the request if we already have too many queued up (either
+        # sleeping or in the ready queue).
         queue_size = len(self.ready_request_queue) + len(self.sleeping_requests)
         if queue_size > self.reject_limit:
             raise LimitExceededError(
@@ -136,9 +147,13 @@ class _PerHostRatelimiter(object):
 
         def queue_request():
             if len(self.current_processing) > self.concurrent_requests:
-                logger.debug("Ratelimit [%s]: Queue req", id(request_id))
                 queue_defer = defer.Deferred()
                 self.ready_request_queue[request_id] = queue_defer
+                logger.info(
+                    "Ratelimiter: queueing request (queue now %i items)",
+                    len(self.ready_request_queue),
+                )
+
                 return queue_defer
             else:
                 return defer.succeed(None)
@@ -150,10 +165,9 @@ class _PerHostRatelimiter(object):
 
         if len(self.request_times) > self.sleep_limit:
             logger.debug(
-                "Ratelimit [%s]: sleeping req",
-                id(request_id),
+                "Ratelimiter: sleeping request for %f sec", self.sleep_sec,
             )
-            ret_defer = run_in_background(sleep, self.sleep_msec / 1000.0)
+            ret_defer = run_in_background(self.clock.sleep, self.sleep_sec)
 
             self.sleeping_requests.add(request_id)
 
@@ -202,11 +216,8 @@ class _PerHostRatelimiter(object):
         )
         self.current_processing.discard(request_id)
         try:
-            request_id, deferred = self.ready_request_queue.popitem()
-
-            # XXX: why do we do the following? the on_start callback above will
-            # do it for us.
-            self.current_processing.add(request_id)
+            # start processing the next item on the queue.
+            _, deferred = self.ready_request_queue.popitem(last=False)
 
             with PreserveLoggingContext():
                 deferred.callback(None)
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index 4e93f69d3a..8a3a06fd74 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -12,14 +12,13 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import synapse.util.logcontext
-from twisted.internet import defer
-
-from synapse.api.errors import CodeMessageException
-
 import logging
 import random
 
+from twisted.internet import defer
+
+import synapse.util.logcontext
+from synapse.api.errors import CodeMessageException
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/util/rlimit.py b/synapse/util/rlimit.py
index f4a9abf83f..6c0f2bb0cf 100644
--- a/synapse/util/rlimit.py
+++ b/synapse/util/rlimit.py
@@ -13,9 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import resource
 import logging
-
+import resource
 
 logger = logging.getLogger("synapse.app.homeserver")
 
diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
index b98b9dc6e4..6f318c6a29 100644
--- a/synapse/util/stringutils.py
+++ b/synapse/util/stringutils.py
@@ -15,6 +15,8 @@
 
 import random
 import string
+
+from six import PY3
 from six.moves import range
 
 _string_with_symbols = (
@@ -33,6 +35,17 @@ def random_string_with_symbols(length):
 
 
 def is_ascii(s):
+
+    if PY3:
+        if isinstance(s, bytes):
+            try:
+                s.decode('ascii').encode('ascii')
+            except UnicodeDecodeError:
+                return False
+            except UnicodeEncodeError:
+                return False
+            return True
+
     try:
         s.encode("ascii")
     except UnicodeEncodeError:
@@ -48,6 +61,9 @@ def to_ascii(s):
 
     If given None then will return None.
     """
+    if PY3:
+        return s
+
     if s is None:
         return None
 
diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py
index 52086df465..3baba3225a 100644
--- a/synapse/util/versionstring.py
+++ b/synapse/util/versionstring.py
@@ -14,9 +14,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import subprocess
-import os
 import logging
+import os
+import subprocess
 
 logger = logging.getLogger(__name__)
 
@@ -30,7 +30,7 @@ def get_version_string(module):
                 ['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
                 stderr=null,
                 cwd=cwd,
-            ).strip()
+            ).strip().decode('ascii')
             git_branch = "b=" + git_branch
         except subprocess.CalledProcessError:
             git_branch = ""
@@ -40,7 +40,7 @@ def get_version_string(module):
                 ['git', 'describe', '--exact-match'],
                 stderr=null,
                 cwd=cwd,
-            ).strip()
+            ).strip().decode('ascii')
             git_tag = "t=" + git_tag
         except subprocess.CalledProcessError:
             git_tag = ""
@@ -50,7 +50,7 @@ def get_version_string(module):
                 ['git', 'rev-parse', '--short', 'HEAD'],
                 stderr=null,
                 cwd=cwd,
-            ).strip()
+            ).strip().decode('ascii')
         except subprocess.CalledProcessError:
             git_commit = ""
 
@@ -60,7 +60,7 @@ def get_version_string(module):
                 ['git', 'describe', '--dirty=' + dirty_string],
                 stderr=null,
                 cwd=cwd,
-            ).strip().endswith(dirty_string)
+            ).strip().decode('ascii').endswith(dirty_string)
 
             git_dirty = "dirty" if is_dirty else ""
         except subprocess.CalledProcessError:
@@ -77,8 +77,8 @@ def get_version_string(module):
                 "%s (%s)" % (
                     module.__version__, git_version,
                 )
-            ).encode("ascii")
+            )
     except Exception as e:
         logger.info("Failed to check for git repository: %s", e)
 
-    return module.__version__.encode("ascii")
+    return module.__version__
diff --git a/synapse/visibility.py b/synapse/visibility.py
index aaca2c584c..d4680863d3 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -13,14 +13,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
-
-from synapse.api.constants import Membership, EventTypes
+import logging
+import operator
 
-from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
+from six import iteritems, itervalues
+from six.moves import map
 
-import logging
+from twisted.internet import defer
 
+from synapse.api.constants import EventTypes, Membership
+from synapse.events.utils import prune_event
+from synapse.types import get_domain_from_id
 
 logger = logging.getLogger(__name__)
 
@@ -43,59 +46,66 @@ MEMBERSHIP_PRIORITY = (
 
 
 @defer.inlineCallbacks
-def filter_events_for_clients(store, user_tuples, events, event_id_to_state,
-                              always_include_ids=frozenset()):
-    """ Returns dict of user_id -> list of events that user is allowed to
-    see.
+def filter_events_for_client(store, user_id, events, is_peeking=False,
+                             always_include_ids=frozenset()):
+    """
+    Check which events a user is allowed to see
 
     Args:
-        user_tuples (str, bool): (user id, is_peeking) for each user to be
-            checked. is_peeking should be true if:
-            * the user is not currently a member of the room, and:
-            * the user has not been a member of the room since the
-            given events
-        events ([synapse.events.EventBase]): list of events to filter
+        store (synapse.storage.DataStore): our datastore (can also be a worker
+            store)
+        user_id(str): user id to be checked
+        events(list[synapse.events.EventBase]): sequence of events to be checked
+        is_peeking(bool): should be True if:
+          * the user is not currently a member of the room, and:
+          * the user has not been a member of the room since the given
+            events
         always_include_ids (set(event_id)): set of event ids to specifically
             include (unless sender is ignored)
-    """
-    forgotten = yield make_deferred_yieldable(defer.gatherResults([
-        defer.maybeDeferred(
-            preserve_fn(store.who_forgot_in_room),
-            room_id,
-        )
-        for room_id in frozenset(e.room_id for e in events)
-    ], consumeErrors=True))
 
-    # Set of membership event_ids that have been forgotten
-    event_id_forgotten = frozenset(
-        row["event_id"] for rows in forgotten for row in rows
+    Returns:
+        Deferred[list[synapse.events.EventBase]]
+    """
+    types = (
+        (EventTypes.RoomHistoryVisibility, ""),
+        (EventTypes.Member, user_id),
+    )
+    event_id_to_state = yield store.get_state_for_events(
+        frozenset(e.event_id for e in events),
+        types=types,
     )
 
-    ignore_dict_content = yield store.get_global_account_data_by_type_for_users(
-        "m.ignored_user_list", user_ids=[user_id for user_id, _ in user_tuples]
+    ignore_dict_content = yield store.get_global_account_data_by_type_for_user(
+        "m.ignored_user_list", user_id,
     )
 
     # FIXME: This will explode if people upload something incorrect.
-    ignore_dict = {
-        user_id: frozenset(
-            content.get("ignored_users", {}).keys() if content else []
-        )
-        for user_id, content in ignore_dict_content.items()
-    }
+    ignore_list = frozenset(
+        ignore_dict_content.get("ignored_users", {}).keys()
+        if ignore_dict_content else []
+    )
+
+    erased_senders = yield store.are_users_erased((e.sender for e in events))
 
-    def allowed(event, user_id, is_peeking, ignore_list):
+    def allowed(event):
         """
         Args:
             event (synapse.events.EventBase): event to check
-            user_id (str)
-            is_peeking (bool)
-            ignore_list (list): list of users to ignore
+
+        Returns:
+            None|EventBase:
+               None if the user cannot see this event at all
+
+               a redacted copy of the event if they can only see a redacted
+               version
+
+               the original event if they can see it as normal.
         """
         if not event.is_state() and event.sender in ignore_list:
-            return False
+            return None
 
         if event.event_id in always_include_ids:
-            return True
+            return event
 
         state = event_id_to_state[event.event_id]
 
@@ -109,10 +119,6 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state,
         if visibility not in VISIBILITY_PRIORITY:
             visibility = "shared"
 
-        # if it was world_readable, it's easy: everyone can read it
-        if visibility == "world_readable":
-            return True
-
         # Always allow history visibility events on boundaries. This is done
         # by setting the effective visibility to the least restrictive
         # of the old vs new.
@@ -146,7 +152,7 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state,
             if membership == "leave" and (
                 prev_membership == "join" or prev_membership == "invite"
             ):
-                return True
+                return event
 
             new_priority = MEMBERSHIP_PRIORITY.index(membership)
             old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
@@ -157,70 +163,203 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state,
         if membership is None:
             membership_event = state.get((EventTypes.Member, user_id), None)
             if membership_event:
-                if membership_event.event_id not in event_id_forgotten:
-                    membership = membership_event.membership
+                membership = membership_event.membership
 
         # if the user was a member of the room at the time of the event,
         # they can see it.
         if membership == Membership.JOIN:
-            return True
+            return event
+
+        # otherwise, it depends on the room visibility.
 
         if visibility == "joined":
             # we weren't a member at the time of the event, so we can't
             # see this event.
-            return False
+            return None
 
         elif visibility == "invited":
             # user can also see the event if they were *invited* at the time
             # of the event.
-            return membership == Membership.INVITE
-
-        else:
-            # visibility is shared: user can also see the event if they have
-            # become a member since the event
+            return (
+                event if membership == Membership.INVITE else None
+            )
+
+        elif visibility == "shared" and is_peeking:
+            # if the visibility is shared, users cannot see the event unless
+            # they have *subequently* joined the room (or were members at the
+            # time, of course)
             #
             # XXX: if the user has subsequently joined and then left again,
             # ideally we would share history up to the point they left. But
-            # we don't know when they left.
-            return not is_peeking
+            # we don't know when they left. We just treat it as though they
+            # never joined, and restrict access.
+            return None
 
-    defer.returnValue({
-        user_id: [
-            event
-            for event in events
-            if allowed(event, user_id, is_peeking, ignore_dict.get(user_id, []))
-        ]
-        for user_id, is_peeking in user_tuples
-    })
+        # the visibility is either shared or world_readable, and the user was
+        # not a member at the time. We allow it, provided the original sender
+        # has not requested their data to be erased, in which case, we return
+        # a redacted version.
+        if erased_senders[event.sender]:
+            return prune_event(event)
 
+        return event
 
-@defer.inlineCallbacks
-def filter_events_for_client(store, user_id, events, is_peeking=False,
-                             always_include_ids=frozenset()):
-    """
-    Check which events a user is allowed to see
+    # check each event: gives an iterable[None|EventBase]
+    filtered_events = map(allowed, events)
 
-    Args:
-        user_id(str): user id to be checked
-        events([synapse.events.EventBase]): list of events to be checked
-        is_peeking(bool): should be True if:
-          * the user is not currently a member of the room, and:
-          * the user has not been a member of the room since the given
-            events
+    # remove the None entries
+    filtered_events = filter(operator.truth, filtered_events)
 
-    Returns:
-        [synapse.events.EventBase]
-    """
-    types = (
-        (EventTypes.RoomHistoryVisibility, ""),
-        (EventTypes.Member, user_id),
+    # we turn it into a list before returning it.
+    defer.returnValue(list(filtered_events))
+
+
+@defer.inlineCallbacks
+def filter_events_for_server(store, server_name, events):
+    # Whatever else we do, we need to check for senders which have requested
+    # erasure of their data.
+    erased_senders = yield store.are_users_erased(
+        e.sender for e in events,
     )
-    event_id_to_state = yield store.get_state_for_events(
+
+    def redact_disallowed(event, state):
+        # if the sender has been gdpr17ed, always return a redacted
+        # copy of the event.
+        if erased_senders[event.sender]:
+            logger.info(
+                "Sender of %s has been erased, redacting",
+                event.event_id,
+            )
+            return prune_event(event)
+
+        # state will be None if we decided we didn't need to filter by
+        # room membership.
+        if not state:
+            return event
+
+        history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
+        if history:
+            visibility = history.content.get("history_visibility", "shared")
+            if visibility in ["invited", "joined"]:
+                # We now loop through all state events looking for
+                # membership states for the requesting server to determine
+                # if the server is either in the room or has been invited
+                # into the room.
+                for ev in itervalues(state):
+                    if ev.type != EventTypes.Member:
+                        continue
+                    try:
+                        domain = get_domain_from_id(ev.state_key)
+                    except Exception:
+                        continue
+
+                    if domain != server_name:
+                        continue
+
+                    memtype = ev.membership
+                    if memtype == Membership.JOIN:
+                        return event
+                    elif memtype == Membership.INVITE:
+                        if visibility == "invited":
+                            return event
+                else:
+                    # server has no users in the room: redact
+                    return prune_event(event)
+
+        return event
+
+    # Next lets check to see if all the events have a history visibility
+    # of "shared" or "world_readable". If thats the case then we don't
+    # need to check membership (as we know the server is in the room).
+    event_to_state_ids = yield store.get_state_ids_for_events(
         frozenset(e.event_id for e in events),
-        types=types
+        types=(
+            (EventTypes.RoomHistoryVisibility, ""),
+        )
     )
-    res = yield filter_events_for_clients(
-        store, [(user_id, is_peeking)], events, event_id_to_state,
-        always_include_ids=always_include_ids,
+
+    visibility_ids = set()
+    for sids in itervalues(event_to_state_ids):
+        hist = sids.get((EventTypes.RoomHistoryVisibility, ""))
+        if hist:
+            visibility_ids.add(hist)
+
+    # If we failed to find any history visibility events then the default
+    # is "shared" visiblity.
+    if not visibility_ids:
+        all_open = True
+    else:
+        event_map = yield store.get_events(visibility_ids)
+        all_open = all(
+            e.content.get("history_visibility") in (None, "shared", "world_readable")
+            for e in itervalues(event_map)
+        )
+
+    if all_open:
+        # all the history_visibility state affecting these events is open, so
+        # we don't need to filter by membership state. We *do* need to check
+        # for user erasure, though.
+        if erased_senders:
+            events = [
+                redact_disallowed(e, None)
+                for e in events
+            ]
+
+        defer.returnValue(events)
+
+    # Ok, so we're dealing with events that have non-trivial visibility
+    # rules, so we need to also get the memberships of the room.
+
+    # first, for each event we're wanting to return, get the event_ids
+    # of the history vis and membership state at those events.
+    event_to_state_ids = yield store.get_state_ids_for_events(
+        frozenset(e.event_id for e in events),
+        types=(
+            (EventTypes.RoomHistoryVisibility, ""),
+            (EventTypes.Member, None),
+        )
     )
-    defer.returnValue(res.get(user_id, []))
+
+    # We only want to pull out member events that correspond to the
+    # server's domain.
+    #
+    # event_to_state_ids contains lots of duplicates, so it turns out to be
+    # cheaper to build a complete set of unique
+    # ((type, state_key), event_id) tuples, and then filter out the ones we
+    # don't want.
+    #
+    state_key_to_event_id_set = {
+        e
+        for key_to_eid in itervalues(event_to_state_ids)
+        for e in key_to_eid.items()
+    }
+
+    def include(typ, state_key):
+        if typ != EventTypes.Member:
+            return True
+
+        # we avoid using get_domain_from_id here for efficiency.
+        idx = state_key.find(":")
+        if idx == -1:
+            return False
+        return state_key[idx + 1:] == server_name
+
+    event_map = yield store.get_events([
+        e_id
+        for key, e_id in state_key_to_event_id_set
+        if include(key[0], key[1])
+    ])
+
+    event_to_state = {
+        e_id: {
+            key: event_map[inner_e_id]
+            for key, inner_e_id in iteritems(key_to_eid)
+            if inner_e_id in event_map
+        }
+        for e_id, key_to_eid in iteritems(event_to_state_ids)
+    }
+
+    defer.returnValue([
+        redact_disallowed(e, event_to_state[e.event_id])
+        for e in events
+    ])
diff --git a/tests/__init__.py b/tests/__init__.py
index aab20e8e02..9d9ca22829 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -14,4 +14,8 @@
 # limitations under the License.
 
 from twisted.trial import util
+
+from tests import utils
+
 util.DEFAULT_TIMEOUT_DURATION = 10
+utils.setupdb()
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index 4575dd9834..f65a27e5f1 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -13,16 +13,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import pymacaroons
 from mock import Mock
+
+import pymacaroons
+
 from twisted.internet import defer
 
 import synapse.handlers.auth
 from synapse.api.auth import Auth
-from synapse.api.errors import AuthError
+from synapse.api.errors import AuthError, Codes, ResourceLimitError
 from synapse.types import UserID
+
 from tests import unittest
-from tests.utils import setup_test_homeserver, mock_getRawHeaders
+from tests.utils import mock_getRawHeaders, setup_test_homeserver
 
 
 class TestHandlers(object):
@@ -31,34 +34,29 @@ class TestHandlers(object):
 
 
 class AuthTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
         self.state_handler = Mock()
         self.store = Mock()
 
-        self.hs = yield setup_test_homeserver(handlers=None)
+        self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None)
         self.hs.get_datastore = Mock(return_value=self.store)
         self.hs.handlers = TestHandlers(self.hs)
         self.auth = Auth(self.hs)
 
         self.test_user = "@foo:bar"
-        self.test_token = "_test_token_"
+        self.test_token = b"_test_token_"
 
         # this is overridden for the appservice tests
         self.store.get_app_service_by_token = Mock(return_value=None)
 
     @defer.inlineCallbacks
     def test_get_user_by_req_user_valid_token(self):
-        user_info = {
-            "name": self.test_user,
-            "token_id": "ditto",
-            "device_id": "device",
-        }
+        user_info = {"name": self.test_user, "token_id": "ditto", "device_id": "device"}
         self.store.get_user_by_access_token = Mock(return_value=user_info)
 
         request = Mock(args={})
-        request.args["access_token"] = [self.test_token]
+        request.args[b"access_token"] = [self.test_token]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
         requester = yield self.auth.get_user_by_req(request)
         self.assertEquals(requester.user.to_string(), self.test_user)
@@ -67,16 +65,13 @@ class AuthTestCase(unittest.TestCase):
         self.store.get_user_by_access_token = Mock(return_value=None)
 
         request = Mock(args={})
-        request.args["access_token"] = [self.test_token]
+        request.args[b"access_token"] = [self.test_token]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
         d = self.auth.get_user_by_req(request)
         self.failureResultOf(d, AuthError)
 
     def test_get_user_by_req_user_missing_token(self):
-        user_info = {
-            "name": self.test_user,
-            "token_id": "ditto",
-        }
+        user_info = {"name": self.test_user, "token_id": "ditto"}
         self.store.get_user_by_access_token = Mock(return_value=user_info)
 
         request = Mock(args={})
@@ -86,22 +81,64 @@ class AuthTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_user_by_req_appservice_valid_token(self):
-        app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
+        app_service = Mock(
+            token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None
+        )
         self.store.get_app_service_by_token = Mock(return_value=app_service)
         self.store.get_user_by_access_token = Mock(return_value=None)
 
         request = Mock(args={})
-        request.args["access_token"] = [self.test_token]
+        request.getClientIP.return_value = "127.0.0.1"
+        request.args[b"access_token"] = [self.test_token]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
         requester = yield self.auth.get_user_by_req(request)
         self.assertEquals(requester.user.to_string(), self.test_user)
 
+    @defer.inlineCallbacks
+    def test_get_user_by_req_appservice_valid_token_good_ip(self):
+        from netaddr import IPSet
+
+        app_service = Mock(
+            token="foobar",
+            url="a_url",
+            sender=self.test_user,
+            ip_range_whitelist=IPSet(["192.168/16"]),
+        )
+        self.store.get_app_service_by_token = Mock(return_value=app_service)
+        self.store.get_user_by_access_token = Mock(return_value=None)
+
+        request = Mock(args={})
+        request.getClientIP.return_value = "192.168.10.10"
+        request.args[b"access_token"] = [self.test_token]
+        request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+        requester = yield self.auth.get_user_by_req(request)
+        self.assertEquals(requester.user.to_string(), self.test_user)
+
+    def test_get_user_by_req_appservice_valid_token_bad_ip(self):
+        from netaddr import IPSet
+
+        app_service = Mock(
+            token="foobar",
+            url="a_url",
+            sender=self.test_user,
+            ip_range_whitelist=IPSet(["192.168/16"]),
+        )
+        self.store.get_app_service_by_token = Mock(return_value=app_service)
+        self.store.get_user_by_access_token = Mock(return_value=None)
+
+        request = Mock(args={})
+        request.getClientIP.return_value = "131.111.8.42"
+        request.args[b"access_token"] = [self.test_token]
+        request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+        d = self.auth.get_user_by_req(request)
+        self.failureResultOf(d, AuthError)
+
     def test_get_user_by_req_appservice_bad_token(self):
         self.store.get_app_service_by_token = Mock(return_value=None)
         self.store.get_user_by_access_token = Mock(return_value=None)
 
         request = Mock(args={})
-        request.args["access_token"] = [self.test_token]
+        request.args[b"access_token"] = [self.test_token]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
         d = self.auth.get_user_by_req(request)
         self.failureResultOf(d, AuthError)
@@ -118,29 +155,37 @@ class AuthTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_user_by_req_appservice_valid_token_valid_user_id(self):
-        masquerading_user_id = "@doppelganger:matrix.org"
-        app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
+        masquerading_user_id = b"@doppelganger:matrix.org"
+        app_service = Mock(
+            token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None
+        )
         app_service.is_interested_in_user = Mock(return_value=True)
         self.store.get_app_service_by_token = Mock(return_value=app_service)
         self.store.get_user_by_access_token = Mock(return_value=None)
 
         request = Mock(args={})
-        request.args["access_token"] = [self.test_token]
-        request.args["user_id"] = [masquerading_user_id]
+        request.getClientIP.return_value = "127.0.0.1"
+        request.args[b"access_token"] = [self.test_token]
+        request.args[b"user_id"] = [masquerading_user_id]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
         requester = yield self.auth.get_user_by_req(request)
-        self.assertEquals(requester.user.to_string(), masquerading_user_id)
+        self.assertEquals(
+            requester.user.to_string(), masquerading_user_id.decode('utf8')
+        )
 
     def test_get_user_by_req_appservice_valid_token_bad_user_id(self):
-        masquerading_user_id = "@doppelganger:matrix.org"
-        app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
+        masquerading_user_id = b"@doppelganger:matrix.org"
+        app_service = Mock(
+            token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None
+        )
         app_service.is_interested_in_user = Mock(return_value=False)
         self.store.get_app_service_by_token = Mock(return_value=app_service)
         self.store.get_user_by_access_token = Mock(return_value=None)
 
         request = Mock(args={})
-        request.args["access_token"] = [self.test_token]
-        request.args["user_id"] = [masquerading_user_id]
+        request.getClientIP.return_value = "127.0.0.1"
+        request.args[b"access_token"] = [self.test_token]
+        request.args[b"user_id"] = [masquerading_user_id]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
         d = self.auth.get_user_by_req(request)
         self.failureResultOf(d, AuthError)
@@ -150,17 +195,15 @@ class AuthTestCase(unittest.TestCase):
         # TODO(danielwh): Remove this mock when we remove the
         # get_user_by_access_token fallback.
         self.store.get_user_by_access_token = Mock(
-            return_value={
-                "name": "@baldrick:matrix.org",
-                "device_id": "device",
-            }
+            return_value={"name": "@baldrick:matrix.org", "device_id": "device"}
         )
 
         user_id = "@baldrick:matrix.org"
         macaroon = pymacaroons.Macaroon(
             location=self.hs.config.server_name,
             identifier="key",
-            key=self.hs.config.macaroon_secret_key)
+            key=self.hs.config.macaroon_secret_key,
+        )
         macaroon.add_first_party_caveat("gen = 1")
         macaroon.add_first_party_caveat("type = access")
         macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
@@ -174,15 +217,14 @@ class AuthTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_guest_user_from_macaroon(self):
-        self.store.get_user_by_id = Mock(return_value={
-            "is_guest": True,
-        })
+        self.store.get_user_by_id = Mock(return_value={"is_guest": True})
 
         user_id = "@baldrick:matrix.org"
         macaroon = pymacaroons.Macaroon(
             location=self.hs.config.server_name,
             identifier="key",
-            key=self.hs.config.macaroon_secret_key)
+            key=self.hs.config.macaroon_secret_key,
+        )
         macaroon.add_first_party_caveat("gen = 1")
         macaroon.add_first_party_caveat("type = access")
         macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
@@ -206,7 +248,8 @@ class AuthTestCase(unittest.TestCase):
         macaroon = pymacaroons.Macaroon(
             location=self.hs.config.server_name,
             identifier="key",
-            key=self.hs.config.macaroon_secret_key)
+            key=self.hs.config.macaroon_secret_key,
+        )
         macaroon.add_first_party_caveat("gen = 1")
         macaroon.add_first_party_caveat("type = access")
         macaroon.add_first_party_caveat("user_id = %s" % (user,))
@@ -226,7 +269,8 @@ class AuthTestCase(unittest.TestCase):
         macaroon = pymacaroons.Macaroon(
             location=self.hs.config.server_name,
             identifier="key",
-            key=self.hs.config.macaroon_secret_key)
+            key=self.hs.config.macaroon_secret_key,
+        )
         macaroon.add_first_party_caveat("gen = 1")
         macaroon.add_first_party_caveat("type = access")
 
@@ -247,7 +291,8 @@ class AuthTestCase(unittest.TestCase):
         macaroon = pymacaroons.Macaroon(
             location=self.hs.config.server_name,
             identifier="key",
-            key=self.hs.config.macaroon_secret_key + "wrong")
+            key=self.hs.config.macaroon_secret_key + "wrong",
+        )
         macaroon.add_first_party_caveat("gen = 1")
         macaroon.add_first_party_caveat("type = access")
         macaroon.add_first_party_caveat("user_id = %s" % (user,))
@@ -269,7 +314,8 @@ class AuthTestCase(unittest.TestCase):
         macaroon = pymacaroons.Macaroon(
             location=self.hs.config.server_name,
             identifier="key",
-            key=self.hs.config.macaroon_secret_key)
+            key=self.hs.config.macaroon_secret_key,
+        )
         macaroon.add_first_party_caveat("gen = 1")
         macaroon.add_first_party_caveat("type = access")
         macaroon.add_first_party_caveat("user_id = %s" % (user,))
@@ -296,7 +342,8 @@ class AuthTestCase(unittest.TestCase):
         macaroon = pymacaroons.Macaroon(
             location=self.hs.config.server_name,
             identifier="key",
-            key=self.hs.config.macaroon_secret_key)
+            key=self.hs.config.macaroon_secret_key,
+        )
         macaroon.add_first_party_caveat("gen = 1")
         macaroon.add_first_party_caveat("type = access")
         macaroon.add_first_party_caveat("user_id = %s" % (user,))
@@ -329,7 +376,8 @@ class AuthTestCase(unittest.TestCase):
         macaroon = pymacaroons.Macaroon(
             location=self.hs.config.server_name,
             identifier="key",
-            key=self.hs.config.macaroon_secret_key)
+            key=self.hs.config.macaroon_secret_key,
+        )
         macaroon.add_first_party_caveat("gen = 1")
         macaroon.add_first_party_caveat("type = access")
         macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
@@ -350,9 +398,7 @@ class AuthTestCase(unittest.TestCase):
         token = yield self.hs.handlers.auth_handler.issue_access_token(
             USER_ID, "DEVICE"
         )
-        self.store.add_access_token_to_user.assert_called_with(
-            USER_ID, token, "DEVICE"
-        )
+        self.store.add_access_token_to_user.assert_called_with(USER_ID, token, "DEVICE")
 
         def get_user(tok):
             if token != tok:
@@ -363,14 +409,13 @@ class AuthTestCase(unittest.TestCase):
                 "token_id": 1234,
                 "device_id": "DEVICE",
             }
+
         self.store.get_user_by_access_token = get_user
-        self.store.get_user_by_id = Mock(return_value={
-            "is_guest": False,
-        })
+        self.store.get_user_by_id = Mock(return_value={"is_guest": False})
 
         # check the token works
         request = Mock(args={})
-        request.args["access_token"] = [token]
+        request.args[b"access_token"] = [token.encode('ascii')]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
         requester = yield self.auth.get_user_by_req(request, allow_guest=True)
         self.assertEqual(UserID.from_string(USER_ID), requester.user)
@@ -383,7 +428,7 @@ class AuthTestCase(unittest.TestCase):
 
         # the token should *not* work now
         request = Mock(args={})
-        request.args["access_token"] = [guest_tok]
+        request.args[b"access_token"] = [guest_tok.encode('ascii')]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
 
         with self.assertRaises(AuthError) as cm:
@@ -393,3 +438,66 @@ class AuthTestCase(unittest.TestCase):
         self.assertEqual("Guest access token used for regular user", cm.exception.msg)
 
         self.store.get_user_by_id.assert_called_with(USER_ID)
+
+    @defer.inlineCallbacks
+    def test_blocking_mau(self):
+        self.hs.config.limit_usage_by_mau = False
+        self.hs.config.max_mau_value = 50
+        lots_of_users = 100
+        small_number_of_users = 1
+
+        # Ensure no error thrown
+        yield self.auth.check_auth_blocking()
+
+        self.hs.config.limit_usage_by_mau = True
+
+        self.store.get_monthly_active_count = Mock(
+            return_value=defer.succeed(lots_of_users)
+        )
+
+        with self.assertRaises(ResourceLimitError) as e:
+            yield self.auth.check_auth_blocking()
+        self.assertEquals(e.exception.admin_contact, self.hs.config.admin_contact)
+        self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+        self.assertEquals(e.exception.code, 403)
+
+        # Ensure does not throw an error
+        self.store.get_monthly_active_count = Mock(
+            return_value=defer.succeed(small_number_of_users)
+        )
+        yield self.auth.check_auth_blocking()
+
+    @defer.inlineCallbacks
+    def test_reserved_threepid(self):
+        self.hs.config.limit_usage_by_mau = True
+        self.hs.config.max_mau_value = 1
+        threepid = {'medium': 'email', 'address': 'reserved@server.com'}
+        unknown_threepid = {'medium': 'email', 'address': 'unreserved@server.com'}
+        self.hs.config.mau_limits_reserved_threepids = [threepid]
+
+        yield self.store.register(user_id='user1', token="123", password_hash=None)
+        with self.assertRaises(ResourceLimitError):
+            yield self.auth.check_auth_blocking()
+
+        with self.assertRaises(ResourceLimitError):
+            yield self.auth.check_auth_blocking(threepid=unknown_threepid)
+
+        yield self.auth.check_auth_blocking(threepid=threepid)
+
+    @defer.inlineCallbacks
+    def test_hs_disabled(self):
+        self.hs.config.hs_disabled = True
+        self.hs.config.hs_disabled_message = "Reason for being disabled"
+        with self.assertRaises(ResourceLimitError) as e:
+            yield self.auth.check_auth_blocking()
+        self.assertEquals(e.exception.admin_contact, self.hs.config.admin_contact)
+        self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+        self.assertEquals(e.exception.code, 403)
+
+    @defer.inlineCallbacks
+    def test_server_notices_mxid_special_cased(self):
+        self.hs.config.hs_disabled = True
+        user = "@user:server"
+        self.hs.config.server_notices_mxid = user
+        self.hs.config.hs_disabled_message = "Reason for being disabled"
+        yield self.auth.check_auth_blocking(user)
diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py
index dcceca7f3e..48b2d3d663 100644
--- a/tests/api/test_filtering.py
+++ b/tests/api/test_filtering.py
@@ -13,19 +13,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from tests import unittest
-from twisted.internet import defer
-
 from mock import Mock
-from tests.utils import (
-    MockHttpResource, DeferredMockCallable, setup_test_homeserver
-)
 
+import jsonschema
+
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError
 from synapse.api.filtering import Filter
 from synapse.events import FrozenEvent
-from synapse.api.errors import SynapseError
 
-import jsonschema
+from tests import unittest
+from tests.utils import DeferredMockCallable, MockHttpResource, setup_test_homeserver
 
 user_localpart = "test_user"
 
@@ -39,7 +38,6 @@ def MockEvent(**kwargs):
 
 
 class FilteringTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
         self.mock_federation_resource = MockHttpResource()
@@ -48,6 +46,7 @@ class FilteringTestCase(unittest.TestCase):
         self.mock_http_client.put_json = DeferredMockCallable()
 
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             handlers=None,
             http_client=self.mock_http_client,
             keyring=Mock(),
@@ -65,7 +64,7 @@ class FilteringTestCase(unittest.TestCase):
             {"room": {"timeline": {"limit": 0}, "state": {"not_bars": ["*"]}}},
             {"event_format": "other"},
             {"room": {"not_rooms": ["#foo:pik-test"]}},
-            {"presence": {"senders": ["@bar;pik.test.com"]}}
+            {"presence": {"senders": ["@bar;pik.test.com"]}},
         ]
         for filter in invalid_filters:
             with self.assertRaises(SynapseError) as check_filter_error:
@@ -82,34 +81,34 @@ class FilteringTestCase(unittest.TestCase):
                     "include_leave": False,
                     "rooms": ["!dee:pik-test"],
                     "not_rooms": ["!gee:pik-test"],
-                    "account_data": {"limit": 0, "types": ["*"]}
+                    "account_data": {"limit": 0, "types": ["*"]},
                 }
             },
             {
                 "room": {
                     "state": {
                         "types": ["m.room.*"],
-                        "not_rooms": ["!726s6s6q:example.com"]
+                        "not_rooms": ["!726s6s6q:example.com"],
                     },
                     "timeline": {
                         "limit": 10,
                         "types": ["m.room.message"],
                         "not_rooms": ["!726s6s6q:example.com"],
-                        "not_senders": ["@spam:example.com"]
+                        "not_senders": ["@spam:example.com"],
                     },
                     "ephemeral": {
                         "types": ["m.receipt", "m.typing"],
                         "not_rooms": ["!726s6s6q:example.com"],
-                        "not_senders": ["@spam:example.com"]
-                    }
+                        "not_senders": ["@spam:example.com"],
+                    },
                 },
                 "presence": {
                     "types": ["m.presence"],
-                    "not_senders": ["@alice:example.com"]
+                    "not_senders": ["@alice:example.com"],
                 },
                 "event_format": "client",
-                "event_fields": ["type", "content", "sender"]
-            }
+                "event_fields": ["type", "content", "sender"],
+            },
         ]
         for filter in valid_filters:
             try:
@@ -122,229 +121,131 @@ class FilteringTestCase(unittest.TestCase):
         pass
 
     def test_definition_types_works_with_literals(self):
-        definition = {
-            "types": ["m.room.message", "org.matrix.foo.bar"]
-        }
-        event = MockEvent(
-            sender="@foo:bar",
-            type="m.room.message",
-            room_id="!foo:bar"
-        )
+        definition = {"types": ["m.room.message", "org.matrix.foo.bar"]}
+        event = MockEvent(sender="@foo:bar", type="m.room.message", room_id="!foo:bar")
 
-        self.assertTrue(
-            Filter(definition).check(event)
-        )
+        self.assertTrue(Filter(definition).check(event))
 
     def test_definition_types_works_with_wildcards(self):
-        definition = {
-            "types": ["m.*", "org.matrix.foo.bar"]
-        }
-        event = MockEvent(
-            sender="@foo:bar",
-            type="m.room.message",
-            room_id="!foo:bar"
-        )
-        self.assertTrue(
-            Filter(definition).check(event)
-        )
+        definition = {"types": ["m.*", "org.matrix.foo.bar"]}
+        event = MockEvent(sender="@foo:bar", type="m.room.message", room_id="!foo:bar")
+        self.assertTrue(Filter(definition).check(event))
 
     def test_definition_types_works_with_unknowns(self):
-        definition = {
-            "types": ["m.room.message", "org.matrix.foo.bar"]
-        }
+        definition = {"types": ["m.room.message", "org.matrix.foo.bar"]}
         event = MockEvent(
             sender="@foo:bar",
             type="now.for.something.completely.different",
-            room_id="!foo:bar"
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            room_id="!foo:bar",
         )
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_not_types_works_with_literals(self):
-        definition = {
-            "not_types": ["m.room.message", "org.matrix.foo.bar"]
-        }
-        event = MockEvent(
-            sender="@foo:bar",
-            type="m.room.message",
-            room_id="!foo:bar"
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
-        )
+        definition = {"not_types": ["m.room.message", "org.matrix.foo.bar"]}
+        event = MockEvent(sender="@foo:bar", type="m.room.message", room_id="!foo:bar")
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_not_types_works_with_wildcards(self):
-        definition = {
-            "not_types": ["m.room.message", "org.matrix.*"]
-        }
+        definition = {"not_types": ["m.room.message", "org.matrix.*"]}
         event = MockEvent(
-            sender="@foo:bar",
-            type="org.matrix.custom.event",
-            room_id="!foo:bar"
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            sender="@foo:bar", type="org.matrix.custom.event", room_id="!foo:bar"
         )
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_not_types_works_with_unknowns(self):
-        definition = {
-            "not_types": ["m.*", "org.*"]
-        }
-        event = MockEvent(
-            sender="@foo:bar",
-            type="com.nom.nom.nom",
-            room_id="!foo:bar"
-        )
-        self.assertTrue(
-            Filter(definition).check(event)
-        )
+        definition = {"not_types": ["m.*", "org.*"]}
+        event = MockEvent(sender="@foo:bar", type="com.nom.nom.nom", room_id="!foo:bar")
+        self.assertTrue(Filter(definition).check(event))
 
     def test_definition_not_types_takes_priority_over_types(self):
         definition = {
             "not_types": ["m.*", "org.*"],
-            "types": ["m.room.message", "m.room.topic"]
+            "types": ["m.room.message", "m.room.topic"],
         }
-        event = MockEvent(
-            sender="@foo:bar",
-            type="m.room.topic",
-            room_id="!foo:bar"
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
-        )
+        event = MockEvent(sender="@foo:bar", type="m.room.topic", room_id="!foo:bar")
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_senders_works_with_literals(self):
-        definition = {
-            "senders": ["@flibble:wibble"]
-        }
+        definition = {"senders": ["@flibble:wibble"]}
         event = MockEvent(
-            sender="@flibble:wibble",
-            type="com.nom.nom.nom",
-            room_id="!foo:bar"
-        )
-        self.assertTrue(
-            Filter(definition).check(event)
+            sender="@flibble:wibble", type="com.nom.nom.nom", room_id="!foo:bar"
         )
+        self.assertTrue(Filter(definition).check(event))
 
     def test_definition_senders_works_with_unknowns(self):
-        definition = {
-            "senders": ["@flibble:wibble"]
-        }
+        definition = {"senders": ["@flibble:wibble"]}
         event = MockEvent(
-            sender="@challenger:appears",
-            type="com.nom.nom.nom",
-            room_id="!foo:bar"
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            sender="@challenger:appears", type="com.nom.nom.nom", room_id="!foo:bar"
         )
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_not_senders_works_with_literals(self):
-        definition = {
-            "not_senders": ["@flibble:wibble"]
-        }
+        definition = {"not_senders": ["@flibble:wibble"]}
         event = MockEvent(
-            sender="@flibble:wibble",
-            type="com.nom.nom.nom",
-            room_id="!foo:bar"
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            sender="@flibble:wibble", type="com.nom.nom.nom", room_id="!foo:bar"
         )
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_not_senders_works_with_unknowns(self):
-        definition = {
-            "not_senders": ["@flibble:wibble"]
-        }
+        definition = {"not_senders": ["@flibble:wibble"]}
         event = MockEvent(
-            sender="@challenger:appears",
-            type="com.nom.nom.nom",
-            room_id="!foo:bar"
-        )
-        self.assertTrue(
-            Filter(definition).check(event)
+            sender="@challenger:appears", type="com.nom.nom.nom", room_id="!foo:bar"
         )
+        self.assertTrue(Filter(definition).check(event))
 
     def test_definition_not_senders_takes_priority_over_senders(self):
         definition = {
             "not_senders": ["@misspiggy:muppets"],
-            "senders": ["@kermit:muppets", "@misspiggy:muppets"]
+            "senders": ["@kermit:muppets", "@misspiggy:muppets"],
         }
         event = MockEvent(
-            sender="@misspiggy:muppets",
-            type="m.room.topic",
-            room_id="!foo:bar"
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            sender="@misspiggy:muppets", type="m.room.topic", room_id="!foo:bar"
         )
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_rooms_works_with_literals(self):
-        definition = {
-            "rooms": ["!secretbase:unknown"]
-        }
+        definition = {"rooms": ["!secretbase:unknown"]}
         event = MockEvent(
-            sender="@foo:bar",
-            type="m.room.message",
-            room_id="!secretbase:unknown"
-        )
-        self.assertTrue(
-            Filter(definition).check(event)
+            sender="@foo:bar", type="m.room.message", room_id="!secretbase:unknown"
         )
+        self.assertTrue(Filter(definition).check(event))
 
     def test_definition_rooms_works_with_unknowns(self):
-        definition = {
-            "rooms": ["!secretbase:unknown"]
-        }
+        definition = {"rooms": ["!secretbase:unknown"]}
         event = MockEvent(
             sender="@foo:bar",
             type="m.room.message",
-            room_id="!anothersecretbase:unknown"
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            room_id="!anothersecretbase:unknown",
         )
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_not_rooms_works_with_literals(self):
-        definition = {
-            "not_rooms": ["!anothersecretbase:unknown"]
-        }
+        definition = {"not_rooms": ["!anothersecretbase:unknown"]}
         event = MockEvent(
             sender="@foo:bar",
             type="m.room.message",
-            room_id="!anothersecretbase:unknown"
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            room_id="!anothersecretbase:unknown",
         )
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_not_rooms_works_with_unknowns(self):
-        definition = {
-            "not_rooms": ["!secretbase:unknown"]
-        }
+        definition = {"not_rooms": ["!secretbase:unknown"]}
         event = MockEvent(
             sender="@foo:bar",
             type="m.room.message",
-            room_id="!anothersecretbase:unknown"
-        )
-        self.assertTrue(
-            Filter(definition).check(event)
+            room_id="!anothersecretbase:unknown",
         )
+        self.assertTrue(Filter(definition).check(event))
 
     def test_definition_not_rooms_takes_priority_over_rooms(self):
         definition = {
             "not_rooms": ["!secretbase:unknown"],
-            "rooms": ["!secretbase:unknown"]
+            "rooms": ["!secretbase:unknown"],
         }
         event = MockEvent(
-            sender="@foo:bar",
-            type="m.room.message",
-            room_id="!secretbase:unknown"
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            sender="@foo:bar", type="m.room.message", room_id="!secretbase:unknown"
         )
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_combined_event(self):
         definition = {
@@ -353,16 +254,14 @@ class FilteringTestCase(unittest.TestCase):
             "rooms": ["!stage:unknown"],
             "not_rooms": ["!piggyshouse:muppets"],
             "types": ["m.room.message", "muppets.kermit.*"],
-            "not_types": ["muppets.misspiggy.*"]
+            "not_types": ["muppets.misspiggy.*"],
         }
         event = MockEvent(
             sender="@kermit:muppets",  # yup
             type="m.room.message",  # yup
-            room_id="!stage:unknown"  # yup
-        )
-        self.assertTrue(
-            Filter(definition).check(event)
+            room_id="!stage:unknown",  # yup
         )
+        self.assertTrue(Filter(definition).check(event))
 
     def test_definition_combined_event_bad_sender(self):
         definition = {
@@ -371,16 +270,14 @@ class FilteringTestCase(unittest.TestCase):
             "rooms": ["!stage:unknown"],
             "not_rooms": ["!piggyshouse:muppets"],
             "types": ["m.room.message", "muppets.kermit.*"],
-            "not_types": ["muppets.misspiggy.*"]
+            "not_types": ["muppets.misspiggy.*"],
         }
         event = MockEvent(
             sender="@misspiggy:muppets",  # nope
             type="m.room.message",  # yup
-            room_id="!stage:unknown"  # yup
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            room_id="!stage:unknown",  # yup
         )
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_combined_event_bad_room(self):
         definition = {
@@ -389,16 +286,14 @@ class FilteringTestCase(unittest.TestCase):
             "rooms": ["!stage:unknown"],
             "not_rooms": ["!piggyshouse:muppets"],
             "types": ["m.room.message", "muppets.kermit.*"],
-            "not_types": ["muppets.misspiggy.*"]
+            "not_types": ["muppets.misspiggy.*"],
         }
         event = MockEvent(
             sender="@kermit:muppets",  # yup
             type="m.room.message",  # yup
-            room_id="!piggyshouse:muppets"  # nope
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            room_id="!piggyshouse:muppets",  # nope
         )
+        self.assertFalse(Filter(definition).check(event))
 
     def test_definition_combined_event_bad_type(self):
         definition = {
@@ -407,37 +302,26 @@ class FilteringTestCase(unittest.TestCase):
             "rooms": ["!stage:unknown"],
             "not_rooms": ["!piggyshouse:muppets"],
             "types": ["m.room.message", "muppets.kermit.*"],
-            "not_types": ["muppets.misspiggy.*"]
+            "not_types": ["muppets.misspiggy.*"],
         }
         event = MockEvent(
             sender="@kermit:muppets",  # yup
             type="muppets.misspiggy.kisses",  # nope
-            room_id="!stage:unknown"  # yup
-        )
-        self.assertFalse(
-            Filter(definition).check(event)
+            room_id="!stage:unknown",  # yup
         )
+        self.assertFalse(Filter(definition).check(event))
 
     @defer.inlineCallbacks
     def test_filter_presence_match(self):
-        user_filter_json = {
-            "presence": {
-                "types": ["m.*"]
-            }
-        }
+        user_filter_json = {"presence": {"types": ["m.*"]}}
         filter_id = yield self.datastore.add_user_filter(
-            user_localpart=user_localpart,
-            user_filter=user_filter_json,
-        )
-        event = MockEvent(
-            sender="@foo:bar",
-            type="m.profile",
+            user_localpart=user_localpart, user_filter=user_filter_json
         )
+        event = MockEvent(sender="@foo:bar", type="m.profile")
         events = [event]
 
         user_filter = yield self.filtering.get_user_filter(
-            user_localpart=user_localpart,
-            filter_id=filter_id,
+            user_localpart=user_localpart, filter_id=filter_id
         )
 
         results = user_filter.filter_presence(events=events)
@@ -445,15 +329,10 @@ class FilteringTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_filter_presence_no_match(self):
-        user_filter_json = {
-            "presence": {
-                "types": ["m.*"]
-            }
-        }
+        user_filter_json = {"presence": {"types": ["m.*"]}}
 
         filter_id = yield self.datastore.add_user_filter(
-            user_localpart=user_localpart + "2",
-            user_filter=user_filter_json,
+            user_localpart=user_localpart + "2", user_filter=user_filter_json
         )
         event = MockEvent(
             event_id="$asdasd:localhost",
@@ -463,8 +342,7 @@ class FilteringTestCase(unittest.TestCase):
         events = [event]
 
         user_filter = yield self.filtering.get_user_filter(
-            user_localpart=user_localpart + "2",
-            filter_id=filter_id,
+            user_localpart=user_localpart + "2", filter_id=filter_id
         )
 
         results = user_filter.filter_presence(events=events)
@@ -472,27 +350,15 @@ class FilteringTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_filter_room_state_match(self):
-        user_filter_json = {
-            "room": {
-                "state": {
-                    "types": ["m.*"]
-                }
-            }
-        }
+        user_filter_json = {"room": {"state": {"types": ["m.*"]}}}
         filter_id = yield self.datastore.add_user_filter(
-            user_localpart=user_localpart,
-            user_filter=user_filter_json,
-        )
-        event = MockEvent(
-            sender="@foo:bar",
-            type="m.room.topic",
-            room_id="!foo:bar"
+            user_localpart=user_localpart, user_filter=user_filter_json
         )
+        event = MockEvent(sender="@foo:bar", type="m.room.topic", room_id="!foo:bar")
         events = [event]
 
         user_filter = yield self.filtering.get_user_filter(
-            user_localpart=user_localpart,
-            filter_id=filter_id,
+            user_localpart=user_localpart, filter_id=filter_id
         )
 
         results = user_filter.filter_room_state(events=events)
@@ -500,27 +366,17 @@ class FilteringTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_filter_room_state_no_match(self):
-        user_filter_json = {
-            "room": {
-                "state": {
-                    "types": ["m.*"]
-                }
-            }
-        }
+        user_filter_json = {"room": {"state": {"types": ["m.*"]}}}
         filter_id = yield self.datastore.add_user_filter(
-            user_localpart=user_localpart,
-            user_filter=user_filter_json,
+            user_localpart=user_localpart, user_filter=user_filter_json
         )
         event = MockEvent(
-            sender="@foo:bar",
-            type="org.matrix.custom.event",
-            room_id="!foo:bar"
+            sender="@foo:bar", type="org.matrix.custom.event", room_id="!foo:bar"
         )
         events = [event]
 
         user_filter = yield self.filtering.get_user_filter(
-            user_localpart=user_localpart,
-            filter_id=filter_id,
+            user_localpart=user_localpart, filter_id=filter_id
         )
 
         results = user_filter.filter_room_state(events)
@@ -544,45 +400,32 @@ class FilteringTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_add_filter(self):
-        user_filter_json = {
-            "room": {
-                "state": {
-                    "types": ["m.*"]
-                }
-            }
-        }
+        user_filter_json = {"room": {"state": {"types": ["m.*"]}}}
 
         filter_id = yield self.filtering.add_user_filter(
-            user_localpart=user_localpart,
-            user_filter=user_filter_json,
+            user_localpart=user_localpart, user_filter=user_filter_json
         )
 
         self.assertEquals(filter_id, 0)
-        self.assertEquals(user_filter_json, (
-            yield self.datastore.get_user_filter(
-                user_localpart=user_localpart,
-                filter_id=0,
-            )
-        ))
+        self.assertEquals(
+            user_filter_json,
+            (
+                yield self.datastore.get_user_filter(
+                    user_localpart=user_localpart, filter_id=0
+                )
+            ),
+        )
 
     @defer.inlineCallbacks
     def test_get_filter(self):
-        user_filter_json = {
-            "room": {
-                "state": {
-                    "types": ["m.*"]
-                }
-            }
-        }
+        user_filter_json = {"room": {"state": {"types": ["m.*"]}}}
 
         filter_id = yield self.datastore.add_user_filter(
-            user_localpart=user_localpart,
-            user_filter=user_filter_json,
+            user_localpart=user_localpart, user_filter=user_filter_json
         )
 
         filter = yield self.filtering.get_user_filter(
-            user_localpart=user_localpart,
-            filter_id=filter_id,
+            user_localpart=user_localpart, filter_id=filter_id
         )
 
         self.assertEquals(filter.get_filter_json(), user_filter_json)
diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py
index c45b59b36c..8933fe3b72 100644
--- a/tests/api/test_ratelimiting.py
+++ b/tests/api/test_ratelimiting.py
@@ -4,17 +4,16 @@ from tests import unittest
 
 
 class TestRatelimiter(unittest.TestCase):
-
     def test_allowed(self):
         limiter = Ratelimiter()
         allowed, time_allowed = limiter.send_message(
-            user_id="test_id", time_now_s=0, msg_rate_hz=0.1, burst_count=1,
+            user_id="test_id", time_now_s=0, msg_rate_hz=0.1, burst_count=1
         )
         self.assertTrue(allowed)
         self.assertEquals(10., time_allowed)
 
         allowed, time_allowed = limiter.send_message(
-            user_id="test_id", time_now_s=5, msg_rate_hz=0.1, burst_count=1,
+            user_id="test_id", time_now_s=5, msg_rate_hz=0.1, burst_count=1
         )
         self.assertFalse(allowed)
         self.assertEquals(10., time_allowed)
@@ -28,7 +27,7 @@ class TestRatelimiter(unittest.TestCase):
     def test_pruning(self):
         limiter = Ratelimiter()
         allowed, time_allowed = limiter.send_message(
-            user_id="test_id_1", time_now_s=0, msg_rate_hz=0.1, burst_count=1,
+            user_id="test_id_1", time_now_s=0, msg_rate_hz=0.1, burst_count=1
         )
 
         self.assertIn("test_id_1", limiter.message_counts)
diff --git a/tests/app/__init__.py b/tests/app/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/app/__init__.py
diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py
new file mode 100644
index 0000000000..76b5090fff
--- /dev/null
+++ b/tests/app/test_frontend_proxy.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.app.frontend_proxy import FrontendProxyServer
+
+from tests.unittest import HomeserverTestCase
+
+
+class FrontendProxyTests(HomeserverTestCase):
+    def make_homeserver(self, reactor, clock):
+
+        hs = self.setup_test_homeserver(
+            http_client=None, homeserverToUse=FrontendProxyServer
+        )
+
+        return hs
+
+    def test_listen_http_with_presence_enabled(self):
+        """
+        When presence is on, the stub servlet will not register.
+        """
+        # Presence is on
+        self.hs.config.use_presence = True
+
+        config = {
+            "port": 8080,
+            "bind_addresses": ["0.0.0.0"],
+            "resources": [{"names": ["client"]}],
+        }
+
+        # Listen with the config
+        self.hs._listen_http(config)
+
+        # Grab the resource from the site that was told to listen
+        self.assertEqual(len(self.reactor.tcpServers), 1)
+        site = self.reactor.tcpServers[0][1]
+        self.resource = (
+            site.resource.children["_matrix"].children["client"].children["r0"]
+        )
+
+        request, channel = self.make_request("PUT", "presence/a/status")
+        self.render(request)
+
+        # 400 + unrecognised, because nothing is registered
+        self.assertEqual(channel.code, 400)
+        self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
+
+    def test_listen_http_with_presence_disabled(self):
+        """
+        When presence is on, the stub servlet will register.
+        """
+        # Presence is off
+        self.hs.config.use_presence = False
+
+        config = {
+            "port": 8080,
+            "bind_addresses": ["0.0.0.0"],
+            "resources": [{"names": ["client"]}],
+        }
+
+        # Listen with the config
+        self.hs._listen_http(config)
+
+        # Grab the resource from the site that was told to listen
+        self.assertEqual(len(self.reactor.tcpServers), 1)
+        site = self.reactor.tcpServers[0][1]
+        self.resource = (
+            site.resource.children["_matrix"].children["client"].children["r0"]
+        )
+
+        request, channel = self.make_request("PUT", "presence/a/status")
+        self.render(request)
+
+        # 401, because the stub servlet still checks authentication
+        self.assertEqual(channel.code, 401)
+        self.assertEqual(channel.json_body["errcode"], "M_MISSING_TOKEN")
diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py
index 5b2b95860a..4003869ed6 100644
--- a/tests/appservice/test_appservice.py
+++ b/tests/appservice/test_appservice.py
@@ -12,25 +12,22 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from synapse.appservice import ApplicationService
+import re
+
+from mock import Mock
 
 from twisted.internet import defer
 
-from mock import Mock
-from tests import unittest
+from synapse.appservice import ApplicationService
 
-import re
+from tests import unittest
 
 
 def _regex(regex, exclusive=True):
-    return {
-        "regex": re.compile(regex),
-        "exclusive": exclusive
-    }
+    return {"regex": re.compile(regex), "exclusive": exclusive}
 
 
 class ApplicationServiceTestCase(unittest.TestCase):
-
     def setUp(self):
         self.service = ApplicationService(
             id="unique_identifier",
@@ -40,8 +37,8 @@ class ApplicationServiceTestCase(unittest.TestCase):
             namespaces={
                 ApplicationService.NS_USERS: [],
                 ApplicationService.NS_ROOMS: [],
-                ApplicationService.NS_ALIASES: []
-            }
+                ApplicationService.NS_ALIASES: [],
+            },
         )
         self.event = Mock(
             type="m.something", room_id="!foo:bar", sender="@someone:somewhere"
@@ -51,25 +48,19 @@ class ApplicationServiceTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_regex_user_id_prefix_match(self):
-        self.service.namespaces[ApplicationService.NS_USERS].append(
-            _regex("@irc_.*")
-        )
+        self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         self.event.sender = "@irc_foobar:matrix.org"
         self.assertTrue((yield self.service.is_interested(self.event)))
 
     @defer.inlineCallbacks
     def test_regex_user_id_prefix_no_match(self):
-        self.service.namespaces[ApplicationService.NS_USERS].append(
-            _regex("@irc_.*")
-        )
+        self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         self.event.sender = "@someone_else:matrix.org"
         self.assertFalse((yield self.service.is_interested(self.event)))
 
     @defer.inlineCallbacks
     def test_regex_room_member_is_checked(self):
-        self.service.namespaces[ApplicationService.NS_USERS].append(
-            _regex("@irc_.*")
-        )
+        self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         self.event.sender = "@someone_else:matrix.org"
         self.event.type = "m.room.member"
         self.event.state_key = "@irc_foobar:matrix.org"
@@ -97,60 +88,47 @@ class ApplicationServiceTestCase(unittest.TestCase):
             _regex("#irc_.*:matrix.org")
         )
         self.store.get_aliases_for_room.return_value = [
-            "#irc_foobar:matrix.org", "#athing:matrix.org"
+            "#irc_foobar:matrix.org",
+            "#athing:matrix.org",
         ]
         self.store.get_users_in_room.return_value = []
-        self.assertTrue((yield self.service.is_interested(
-            self.event, self.store
-        )))
+        self.assertTrue((yield self.service.is_interested(self.event, self.store)))
 
     def test_non_exclusive_alias(self):
         self.service.namespaces[ApplicationService.NS_ALIASES].append(
             _regex("#irc_.*:matrix.org", exclusive=False)
         )
-        self.assertFalse(self.service.is_exclusive_alias(
-            "#irc_foobar:matrix.org"
-        ))
+        self.assertFalse(self.service.is_exclusive_alias("#irc_foobar:matrix.org"))
 
     def test_non_exclusive_room(self):
         self.service.namespaces[ApplicationService.NS_ROOMS].append(
             _regex("!irc_.*:matrix.org", exclusive=False)
         )
-        self.assertFalse(self.service.is_exclusive_room(
-            "!irc_foobar:matrix.org"
-        ))
+        self.assertFalse(self.service.is_exclusive_room("!irc_foobar:matrix.org"))
 
     def test_non_exclusive_user(self):
         self.service.namespaces[ApplicationService.NS_USERS].append(
             _regex("@irc_.*:matrix.org", exclusive=False)
         )
-        self.assertFalse(self.service.is_exclusive_user(
-            "@irc_foobar:matrix.org"
-        ))
+        self.assertFalse(self.service.is_exclusive_user("@irc_foobar:matrix.org"))
 
     def test_exclusive_alias(self):
         self.service.namespaces[ApplicationService.NS_ALIASES].append(
             _regex("#irc_.*:matrix.org", exclusive=True)
         )
-        self.assertTrue(self.service.is_exclusive_alias(
-            "#irc_foobar:matrix.org"
-        ))
+        self.assertTrue(self.service.is_exclusive_alias("#irc_foobar:matrix.org"))
 
     def test_exclusive_user(self):
         self.service.namespaces[ApplicationService.NS_USERS].append(
             _regex("@irc_.*:matrix.org", exclusive=True)
         )
-        self.assertTrue(self.service.is_exclusive_user(
-            "@irc_foobar:matrix.org"
-        ))
+        self.assertTrue(self.service.is_exclusive_user("@irc_foobar:matrix.org"))
 
     def test_exclusive_room(self):
         self.service.namespaces[ApplicationService.NS_ROOMS].append(
             _regex("!irc_.*:matrix.org", exclusive=True)
         )
-        self.assertTrue(self.service.is_exclusive_room(
-            "!irc_foobar:matrix.org"
-        ))
+        self.assertTrue(self.service.is_exclusive_room("!irc_foobar:matrix.org"))
 
     @defer.inlineCallbacks
     def test_regex_alias_no_match(self):
@@ -158,47 +136,36 @@ class ApplicationServiceTestCase(unittest.TestCase):
             _regex("#irc_.*:matrix.org")
         )
         self.store.get_aliases_for_room.return_value = [
-            "#xmpp_foobar:matrix.org", "#athing:matrix.org"
+            "#xmpp_foobar:matrix.org",
+            "#athing:matrix.org",
         ]
         self.store.get_users_in_room.return_value = []
-        self.assertFalse((yield self.service.is_interested(
-            self.event, self.store
-        )))
+        self.assertFalse((yield self.service.is_interested(self.event, self.store)))
 
     @defer.inlineCallbacks
     def test_regex_multiple_matches(self):
         self.service.namespaces[ApplicationService.NS_ALIASES].append(
             _regex("#irc_.*:matrix.org")
         )
-        self.service.namespaces[ApplicationService.NS_USERS].append(
-            _regex("@irc_.*")
-        )
+        self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         self.event.sender = "@irc_foobar:matrix.org"
         self.store.get_aliases_for_room.return_value = ["#irc_barfoo:matrix.org"]
         self.store.get_users_in_room.return_value = []
-        self.assertTrue((yield self.service.is_interested(
-            self.event, self.store
-        )))
+        self.assertTrue((yield self.service.is_interested(self.event, self.store)))
 
     @defer.inlineCallbacks
     def test_interested_in_self(self):
         # make sure invites get through
         self.service.sender = "@appservice:name"
-        self.service.namespaces[ApplicationService.NS_USERS].append(
-            _regex("@irc_.*")
-        )
+        self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         self.event.type = "m.room.member"
-        self.event.content = {
-            "membership": "invite"
-        }
+        self.event.content = {"membership": "invite"}
         self.event.state_key = self.service.sender
         self.assertTrue((yield self.service.is_interested(self.event)))
 
     @defer.inlineCallbacks
     def test_member_list_match(self):
-        self.service.namespaces[ApplicationService.NS_USERS].append(
-            _regex("@irc_.*")
-        )
+        self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         self.store.get_users_in_room.return_value = [
             "@alice:here",
             "@irc_fo:here",  # AS user
@@ -207,6 +174,6 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.store.get_aliases_for_room.return_value = []
 
         self.event.sender = "@xmpp_foobar:matrix.org"
-        self.assertTrue((yield self.service.is_interested(
-            event=self.event, store=self.store
-        )))
+        self.assertTrue(
+            (yield self.service.is_interested(event=self.event, store=self.store))
+        )
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
index 9181692771..db9f86bdac 100644
--- a/tests/appservice/test_scheduler.py
+++ b/tests/appservice/test_scheduler.py
@@ -12,20 +12,24 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from mock import Mock
+
+from twisted.internet import defer
+
 from synapse.appservice import ApplicationServiceState
 from synapse.appservice.scheduler import (
-    _ServiceQueuer, _TransactionController, _Recoverer
+    _Recoverer,
+    _ServiceQueuer,
+    _TransactionController,
 )
-from twisted.internet import defer
-
 from synapse.util.logcontext import make_deferred_yieldable
-from ..utils import MockClock
-from mock import Mock
+
 from tests import unittest
 
+from ..utils import MockClock
 
-class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
 
+class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
     def setUp(self):
         self.clock = MockClock()
         self.store = Mock()
@@ -33,8 +37,10 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
         self.recoverer = Mock()
         self.recoverer_fn = Mock(return_value=self.recoverer)
         self.txnctrl = _TransactionController(
-            clock=self.clock, store=self.store, as_api=self.as_api,
-            recoverer_fn=self.recoverer_fn
+            clock=self.clock,
+            store=self.store,
+            as_api=self.as_api,
+            recoverer_fn=self.recoverer_fn,
         )
 
     def test_single_service_up_txn_sent(self):
@@ -49,9 +55,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
             return_value=defer.succeed(ApplicationServiceState.UP)
         )
         txn.send = Mock(return_value=defer.succeed(True))
-        self.store.create_appservice_txn = Mock(
-            return_value=defer.succeed(txn)
-        )
+        self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn))
 
         # actual call
         self.txnctrl.send(service, events)
@@ -72,9 +76,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
         self.store.get_appservice_state = Mock(
             return_value=defer.succeed(ApplicationServiceState.DOWN)
         )
-        self.store.create_appservice_txn = Mock(
-            return_value=defer.succeed(txn)
-        )
+        self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn))
 
         # actual call
         self.txnctrl.send(service, events)
@@ -99,9 +101,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
         )
         self.store.set_appservice_state = Mock(return_value=defer.succeed(True))
         txn.send = Mock(return_value=defer.succeed(False))  # fails to send
-        self.store.create_appservice_txn = Mock(
-            return_value=defer.succeed(txn)
-        )
+        self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn))
 
         # actual call
         self.txnctrl.send(service, events)
@@ -119,7 +119,6 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
 
 
 class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
-
     def setUp(self):
         self.clock = MockClock()
         self.as_api = Mock()
@@ -141,6 +140,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
 
         def take_txn(*args, **kwargs):
             return defer.succeed(txns.pop(0))
+
         self.store.get_oldest_unsent_txn = Mock(side_effect=take_txn)
 
         self.recoverer.recover()
@@ -166,6 +166,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
                 return defer.succeed(txns.pop(0))
             else:
                 return defer.succeed(txn)
+
         self.store.get_oldest_unsent_txn = Mock(side_effect=take_txn)
 
         self.recoverer.recover()
@@ -192,7 +193,6 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
 
 
 class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
-
     def setUp(self):
         self.txn_ctrl = Mock()
         self.queuer = _ServiceQueuer(self.txn_ctrl, MockClock())
@@ -206,9 +206,7 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
 
     def test_send_single_event_with_queue(self):
         d = defer.Deferred()
-        self.txn_ctrl.send = Mock(
-            side_effect=lambda x, y: make_deferred_yieldable(d),
-        )
+        self.txn_ctrl.send = Mock(side_effect=lambda x, y: make_deferred_yieldable(d))
         service = Mock(id=4)
         event = Mock(event_id="first")
         event2 = Mock(event_id="second")
@@ -242,6 +240,7 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
 
         def do_send(x, y):
             return make_deferred_yieldable(send_return_list.pop(0))
+
         self.txn_ctrl.send = Mock(side_effect=do_send)
 
         # send events for different ASes and make sure they are sent
diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py
index 879159ccea..f88d28a19d 100644
--- a/tests/config/test_generate.py
+++ b/tests/config/test_generate.py
@@ -19,11 +19,11 @@ import shutil
 import tempfile
 
 from synapse.config.homeserver import HomeServerConfig
+
 from tests import unittest
 
 
 class ConfigGenerationTestCase(unittest.TestCase):
-
     def setUp(self):
         self.dir = tempfile.mkdtemp()
         self.file = os.path.join(self.dir, "homeserver.yaml")
@@ -32,23 +32,30 @@ class ConfigGenerationTestCase(unittest.TestCase):
         shutil.rmtree(self.dir)
 
     def test_generate_config_generates_files(self):
-        HomeServerConfig.load_or_generate_config("", [
-            "--generate-config",
-            "-c", self.file,
-            "--report-stats=yes",
-            "-H", "lemurs.win"
-        ])
+        HomeServerConfig.load_or_generate_config(
+            "",
+            [
+                "--generate-config",
+                "-c",
+                self.file,
+                "--report-stats=yes",
+                "-H",
+                "lemurs.win",
+            ],
+        )
 
         self.assertSetEqual(
-            set([
-                "homeserver.yaml",
-                "lemurs.win.log.config",
-                "lemurs.win.signing.key",
-                "lemurs.win.tls.crt",
-                "lemurs.win.tls.dh",
-                "lemurs.win.tls.key",
-            ]),
-            set(os.listdir(self.dir))
+            set(
+                [
+                    "homeserver.yaml",
+                    "lemurs.win.log.config",
+                    "lemurs.win.signing.key",
+                    "lemurs.win.tls.crt",
+                    "lemurs.win.tls.dh",
+                    "lemurs.win.tls.key",
+                ]
+            ),
+            set(os.listdir(self.dir)),
         )
 
         self.assert_log_filename_is(
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index 772afd2cf9..d5f1777093 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -15,13 +15,15 @@
 import os.path
 import shutil
 import tempfile
+
 import yaml
+
 from synapse.config.homeserver import HomeServerConfig
+
 from tests import unittest
 
 
 class ConfigLoadingTestCase(unittest.TestCase):
-
     def setUp(self):
         self.dir = tempfile.mkdtemp()
         print(self.dir)
@@ -40,15 +42,14 @@ class ConfigLoadingTestCase(unittest.TestCase):
     def test_generates_and_loads_macaroon_secret_key(self):
         self.generate_config()
 
-        with open(self.file,
-                  "r") as f:
+        with open(self.file, "r") as f:
             raw = yaml.load(f)
         self.assertIn("macaroon_secret_key", raw)
 
         config = HomeServerConfig.load_config("", ["-c", self.file])
         self.assertTrue(
             hasattr(config, "macaroon_secret_key"),
-            "Want config to have attr macaroon_secret_key"
+            "Want config to have attr macaroon_secret_key",
         )
         if len(config.macaroon_secret_key) < 5:
             self.fail(
@@ -59,7 +60,7 @@ class ConfigLoadingTestCase(unittest.TestCase):
         config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
         self.assertTrue(
             hasattr(config, "macaroon_secret_key"),
-            "Want config to have attr macaroon_secret_key"
+            "Want config to have attr macaroon_secret_key",
         )
         if len(config.macaroon_secret_key) < 5:
             self.fail(
@@ -77,10 +78,9 @@ class ConfigLoadingTestCase(unittest.TestCase):
 
     def test_disable_registration(self):
         self.generate_config()
-        self.add_lines_to_config([
-            "enable_registration: true",
-            "disable_registration: true",
-        ])
+        self.add_lines_to_config(
+            ["enable_registration: true", "disable_registration: true"]
+        )
         # Check that disable_registration clobbers enable_registration.
         config = HomeServerConfig.load_config("", ["-c", self.file])
         self.assertFalse(config.enable_registration)
@@ -89,18 +89,23 @@ class ConfigLoadingTestCase(unittest.TestCase):
         self.assertFalse(config.enable_registration)
 
         # Check that either config value is clobbered by the command line.
-        config = HomeServerConfig.load_or_generate_config("", [
-            "-c", self.file, "--enable-registration"
-        ])
+        config = HomeServerConfig.load_or_generate_config(
+            "", ["-c", self.file, "--enable-registration"]
+        )
         self.assertTrue(config.enable_registration)
 
     def generate_config(self):
-        HomeServerConfig.load_or_generate_config("", [
-            "--generate-config",
-            "-c", self.file,
-            "--report-stats=yes",
-            "-H", "lemurs.win"
-        ])
+        HomeServerConfig.load_or_generate_config(
+            "",
+            [
+                "--generate-config",
+                "-c",
+                self.file,
+                "--report-stats=yes",
+                "-H",
+                "lemurs.win",
+            ],
+        )
 
     def generate_config_and_remove_lines_containing(self, needle):
         self.generate_config()
diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py
index 47cb328a01..b2536c1e69 100644
--- a/tests/crypto/test_event_signing.py
+++ b/tests/crypto/test_event_signing.py
@@ -14,21 +14,17 @@
 # limitations under the License.
 
 
-from tests import unittest
-
-from synapse.events.builder import EventBuilder
-from synapse.crypto.event_signing import add_hashes_and_signatures
-
+import nacl.signing
 from unpaddedbase64 import decode_base64
 
-import nacl.signing
+from synapse.crypto.event_signing import add_hashes_and_signatures
+from synapse.events.builder import EventBuilder
 
+from tests import unittest
 
 # Perform these tests using given secret key so we get entirely deterministic
 # signatures output that we can test against.
-SIGNING_KEY_SEED = decode_base64(
-    "YJDBA9Xnr2sVqXD9Vj7XVUnmFZcZrlw8Md7kMW+3XA1"
-)
+SIGNING_KEY_SEED = decode_base64("YJDBA9Xnr2sVqXD9Vj7XVUnmFZcZrlw8Md7kMW+3XA1")
 
 KEY_ALG = "ed25519"
 KEY_VER = 1
@@ -38,7 +34,6 @@ HOSTNAME = "domain"
 
 
 class EventSigningTestCase(unittest.TestCase):
-
     def setUp(self):
         self.signing_key = nacl.signing.SigningKey(SIGNING_KEY_SEED)
         self.signing_key.alg = KEY_ALG
@@ -53,7 +48,7 @@ class EventSigningTestCase(unittest.TestCase):
                 'signatures': {},
                 'type': "X",
                 'unsigned': {'age_ts': 1000000},
-            },
+            }
         )
 
         add_hashes_and_signatures(builder, HOSTNAME, self.signing_key)
@@ -63,8 +58,7 @@ class EventSigningTestCase(unittest.TestCase):
         self.assertTrue(hasattr(event, 'hashes'))
         self.assertIn('sha256', event.hashes)
         self.assertEquals(
-            event.hashes['sha256'],
-            "6tJjLpXtggfke8UxFhAKg82QVkJzvKOVOOSjUDK4ZSI",
+            event.hashes['sha256'], "6tJjLpXtggfke8UxFhAKg82QVkJzvKOVOOSjUDK4ZSI"
         )
 
         self.assertTrue(hasattr(event, 'signatures'))
@@ -79,9 +73,7 @@ class EventSigningTestCase(unittest.TestCase):
     def test_sign_message(self):
         builder = EventBuilder(
             {
-                'content': {
-                    'body': "Here is the message content",
-                },
+                'content': {'body': "Here is the message content"},
                 'event_id': "$0:domain",
                 'origin': "domain",
                 'origin_server_ts': 1000000,
@@ -100,8 +92,7 @@ class EventSigningTestCase(unittest.TestCase):
         self.assertTrue(hasattr(event, 'hashes'))
         self.assertIn('sha256', event.hashes)
         self.assertEquals(
-            event.hashes['sha256'],
-            "onLKD1bGljeBWQhWZ1kaP9SorVmRQNdN5aM2JYU2n/g",
+            event.hashes['sha256'], "onLKD1bGljeBWQhWZ1kaP9SorVmRQNdN5aM2JYU2n/g"
         )
 
         self.assertTrue(hasattr(event, 'signatures'))
@@ -110,5 +101,5 @@ class EventSigningTestCase(unittest.TestCase):
         self.assertEquals(
             event.signatures[HOSTNAME][KEY_NAME],
             "Wm+VzmOUOz08Ds+0NTWb1d4CZrVsJSikkeRxh6aCcUw"
-            "u6pNC78FunoD7KNWzqFn241eYHYMGCA5McEiVPdhzBA"
+            "u6pNC78FunoD7KNWzqFn241eYHYMGCA5McEiVPdhzBA",
         )
diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py
index 149e443022..8299dc72c8 100644
--- a/tests/crypto/test_keyring.py
+++ b/tests/crypto/test_keyring.py
@@ -14,15 +14,19 @@
 # limitations under the License.
 import time
 
+from mock import Mock
+
 import signedjson.key
 import signedjson.sign
-from mock import Mock
+
+from twisted.internet import defer, reactor
+
 from synapse.api.errors import SynapseError
 from synapse.crypto import keyring
-from synapse.util import async, logcontext
+from synapse.util import Clock, logcontext
 from synapse.util.logcontext import LoggingContext
+
 from tests import unittest, utils
-from twisted.internet import defer
 
 
 class MockPerspectiveServer(object):
@@ -32,9 +36,7 @@ class MockPerspectiveServer(object):
 
     def get_verify_keys(self):
         vk = signedjson.key.get_verify_key(self.key)
-        return {
-            "%s:%s" % (vk.alg, vk.version): vk,
-        }
+        return {"%s:%s" % (vk.alg, vk.version): vk}
 
     def get_signed_key(self, server_name, verify_key):
         key_id = "%s:%s" % (verify_key.alg, verify_key.version)
@@ -43,10 +45,8 @@ class MockPerspectiveServer(object):
             "old_verify_keys": {},
             "valid_until_ts": time.time() * 1000 + 3600,
             "verify_keys": {
-                key_id: {
-                    "key": signedjson.key.encode_verify_key_base64(verify_key)
-                }
-            }
+                key_id: {"key": signedjson.key.encode_verify_key_base64(verify_key)}
+            },
         }
         signedjson.sign.sign_json(res, self.server_name, self.key)
         return res
@@ -58,18 +58,14 @@ class KeyringTestCase(unittest.TestCase):
         self.mock_perspective_server = MockPerspectiveServer()
         self.http_client = Mock()
         self.hs = yield utils.setup_test_homeserver(
-            handlers=None,
-            http_client=self.http_client,
+            self.addCleanup, handlers=None, http_client=self.http_client
         )
-        self.hs.config.perspectives = {
-            self.mock_perspective_server.server_name:
-                self.mock_perspective_server.get_verify_keys()
-        }
+        keys = self.mock_perspective_server.get_verify_keys()
+        self.hs.config.perspectives = {self.mock_perspective_server.server_name: keys}
 
     def check_context(self, _, expected):
         self.assertEquals(
-            getattr(LoggingContext.current_context(), "request", None),
-            expected
+            getattr(LoggingContext.current_context(), "request", None), expected
         )
 
     @defer.inlineCallbacks
@@ -85,8 +81,7 @@ class KeyringTestCase(unittest.TestCase):
             context_one.request = "one"
 
             wait_1_deferred = kr.wait_for_previous_lookups(
-                ["server1"],
-                {"server1": lookup_1_deferred},
+                ["server1"], {"server1": lookup_1_deferred}
             )
 
             # there were no previous lookups, so the deferred should be ready
@@ -101,8 +96,7 @@ class KeyringTestCase(unittest.TestCase):
             # set off another wait. It should block because the first lookup
             # hasn't yet completed.
             wait_2_deferred = kr.wait_for_previous_lookups(
-                ["server1"],
-                {"server1": lookup_2_deferred},
+                ["server1"], {"server1": lookup_2_deferred}
             )
             self.assertFalse(wait_2_deferred.called)
             # ... so we should have reset the LoggingContext.
@@ -118,6 +112,7 @@ class KeyringTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_verify_json_objects_for_server_awaits_previous_requests(self):
+        clock = Clock(reactor)
         key1 = signedjson.key.generate_signing_key(1)
 
         kr = keyring.Keyring(self.hs)
@@ -127,21 +122,19 @@ class KeyringTestCase(unittest.TestCase):
         persp_resp = {
             "server_keys": [
                 self.mock_perspective_server.get_signed_key(
-                    "server10",
-                    signedjson.key.get_verify_key(key1)
-                ),
+                    "server10", signedjson.key.get_verify_key(key1)
+                )
             ]
         }
         persp_deferred = defer.Deferred()
 
         @defer.inlineCallbacks
         def get_perspectives(**kwargs):
-            self.assertEquals(
-                LoggingContext.current_context().request, "11",
-            )
+            self.assertEquals(LoggingContext.current_context().request, "11")
             with logcontext.PreserveLoggingContext():
                 yield persp_deferred
             defer.returnValue(persp_resp)
+
         self.http_client.post_json.side_effect = get_perspectives
 
         with LoggingContext("11") as context_11:
@@ -149,9 +142,7 @@ class KeyringTestCase(unittest.TestCase):
 
             # start off a first set of lookups
             res_deferreds = kr.verify_json_objects_for_server(
-                [("server10", json1),
-                 ("server11", {})
-                 ]
+                [("server10", json1), ("server11", {})]
             )
 
             # the unsigned json should be rejected pretty quickly
@@ -167,7 +158,7 @@ class KeyringTestCase(unittest.TestCase):
 
             # wait a tick for it to send the request to the perspectives server
             # (it first tries the datastore)
-            yield async.sleep(1)   # XXX find out why this takes so long!
+            yield clock.sleep(1)  # XXX find out why this takes so long!
             self.http_client.post_json.assert_called_once()
 
             self.assertIs(LoggingContext.current_context(), context_11)
@@ -181,9 +172,9 @@ class KeyringTestCase(unittest.TestCase):
                 self.http_client.post_json.return_value = defer.Deferred()
 
                 res_deferreds_2 = kr.verify_json_objects_for_server(
-                    [("server10", json1)],
+                    [("server10", json1)]
                 )
-                yield async.sleep(1)
+                yield clock.sleep(1)
                 self.http_client.post_json.assert_not_called()
                 res_deferreds_2[0].addBoth(self.check_context, None)
 
@@ -202,8 +193,7 @@ class KeyringTestCase(unittest.TestCase):
 
         key1 = signedjson.key.generate_signing_key(1)
         yield self.hs.datastore.store_server_verify_key(
-            "server9", "", time.time() * 1000,
-            signedjson.key.get_verify_key(key1),
+            "server9", "", time.time() * 1000, signedjson.key.get_verify_key(key1)
         )
         json1 = {}
         signedjson.sign.sign_json(json1, "server9", key1)
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index dfc870066e..ff217ca8b9 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -14,11 +14,11 @@
 # limitations under the License.
 
 
-from .. import unittest
-
 from synapse.events import FrozenEvent
 from synapse.events.utils import prune_event, serialize_event
 
+from .. import unittest
+
 
 def MockEvent(**kwargs):
     if "event_id" not in kwargs:
@@ -31,25 +31,20 @@ def MockEvent(**kwargs):
 class PruneEventTestCase(unittest.TestCase):
     """ Asserts that a new event constructed with `evdict` will look like
     `matchdict` when it is redacted. """
+
     def run_test(self, evdict, matchdict):
-        self.assertEquals(
-            prune_event(FrozenEvent(evdict)).get_dict(),
-            matchdict
-        )
+        self.assertEquals(prune_event(FrozenEvent(evdict)).get_dict(), matchdict)
 
     def test_minimal(self):
         self.run_test(
-            {
-                'type': 'A',
-                'event_id': '$test:domain',
-            },
+            {'type': 'A', 'event_id': '$test:domain'},
             {
                 'type': 'A',
                 'event_id': '$test:domain',
                 'content': {},
                 'signatures': {},
                 'unsigned': {},
-            }
+            },
         )
 
     def test_basic_keys(self):
@@ -70,23 +65,19 @@ class PruneEventTestCase(unittest.TestCase):
                 'content': {},
                 'signatures': {},
                 'unsigned': {},
-            }
+            },
         )
 
     def test_unsigned_age_ts(self):
         self.run_test(
-            {
-                'type': 'B',
-                'event_id': '$test:domain',
-                'unsigned': {'age_ts': 20},
-            },
+            {'type': 'B', 'event_id': '$test:domain', 'unsigned': {'age_ts': 20}},
             {
                 'type': 'B',
                 'event_id': '$test:domain',
                 'content': {},
                 'signatures': {},
                 'unsigned': {'age_ts': 20},
-            }
+            },
         )
 
         self.run_test(
@@ -101,23 +92,19 @@ class PruneEventTestCase(unittest.TestCase):
                 'content': {},
                 'signatures': {},
                 'unsigned': {},
-            }
+            },
         )
 
     def test_content(self):
         self.run_test(
-            {
-                'type': 'C',
-                'event_id': '$test:domain',
-                'content': {'things': 'here'},
-            },
+            {'type': 'C', 'event_id': '$test:domain', 'content': {'things': 'here'}},
             {
                 'type': 'C',
                 'event_id': '$test:domain',
                 'content': {},
                 'signatures': {},
                 'unsigned': {},
-            }
+            },
         )
 
         self.run_test(
@@ -132,27 +119,20 @@ class PruneEventTestCase(unittest.TestCase):
                 'content': {'creator': '@2:domain'},
                 'signatures': {},
                 'unsigned': {},
-            }
+            },
         )
 
 
 class SerializeEventTestCase(unittest.TestCase):
-
     def serialize(self, ev, fields):
         return serialize_event(ev, 1479807801915, only_event_fields=fields)
 
     def test_event_fields_works_with_keys(self):
         self.assertEquals(
             self.serialize(
-                MockEvent(
-                    sender="@alice:localhost",
-                    room_id="!foo:bar"
-                ),
-                ["room_id"]
+                MockEvent(sender="@alice:localhost", room_id="!foo:bar"), ["room_id"]
             ),
-            {
-                "room_id": "!foo:bar",
-            }
+            {"room_id": "!foo:bar"},
         )
 
     def test_event_fields_works_with_nested_keys(self):
@@ -161,17 +141,11 @@ class SerializeEventTestCase(unittest.TestCase):
                 MockEvent(
                     sender="@alice:localhost",
                     room_id="!foo:bar",
-                    content={
-                        "body": "A message",
-                    },
+                    content={"body": "A message"},
                 ),
-                ["content.body"]
+                ["content.body"],
             ),
-            {
-                "content": {
-                    "body": "A message",
-                }
-            }
+            {"content": {"body": "A message"}},
         )
 
     def test_event_fields_works_with_dot_keys(self):
@@ -180,17 +154,11 @@ class SerializeEventTestCase(unittest.TestCase):
                 MockEvent(
                     sender="@alice:localhost",
                     room_id="!foo:bar",
-                    content={
-                        "key.with.dots": {},
-                    },
+                    content={"key.with.dots": {}},
                 ),
-                ["content.key\.with\.dots"]
+                ["content.key\.with\.dots"],
             ),
-            {
-                "content": {
-                    "key.with.dots": {},
-                }
-            }
+            {"content": {"key.with.dots": {}}},
         )
 
     def test_event_fields_works_with_nested_dot_keys(self):
@@ -201,21 +169,12 @@ class SerializeEventTestCase(unittest.TestCase):
                     room_id="!foo:bar",
                     content={
                         "not_me": 1,
-                        "nested.dot.key": {
-                            "leaf.key": 42,
-                            "not_me_either": 1,
-                        },
+                        "nested.dot.key": {"leaf.key": 42, "not_me_either": 1},
                     },
                 ),
-                ["content.nested\.dot\.key.leaf\.key"]
+                ["content.nested\.dot\.key.leaf\.key"],
             ),
-            {
-                "content": {
-                    "nested.dot.key": {
-                        "leaf.key": 42,
-                    },
-                }
-            }
+            {"content": {"nested.dot.key": {"leaf.key": 42}}},
         )
 
     def test_event_fields_nops_with_unknown_keys(self):
@@ -224,17 +183,11 @@ class SerializeEventTestCase(unittest.TestCase):
                 MockEvent(
                     sender="@alice:localhost",
                     room_id="!foo:bar",
-                    content={
-                        "foo": "bar",
-                    },
+                    content={"foo": "bar"},
                 ),
-                ["content.foo", "content.notexists"]
+                ["content.foo", "content.notexists"],
             ),
-            {
-                "content": {
-                    "foo": "bar",
-                }
-            }
+            {"content": {"foo": "bar"}},
         )
 
     def test_event_fields_nops_with_non_dict_keys(self):
@@ -243,13 +196,11 @@ class SerializeEventTestCase(unittest.TestCase):
                 MockEvent(
                     sender="@alice:localhost",
                     room_id="!foo:bar",
-                    content={
-                        "foo": ["I", "am", "an", "array"],
-                    },
+                    content={"foo": ["I", "am", "an", "array"]},
                 ),
-                ["content.foo.am"]
+                ["content.foo.am"],
             ),
-            {}
+            {},
         )
 
     def test_event_fields_nops_with_array_keys(self):
@@ -258,13 +209,11 @@ class SerializeEventTestCase(unittest.TestCase):
                 MockEvent(
                     sender="@alice:localhost",
                     room_id="!foo:bar",
-                    content={
-                        "foo": ["I", "am", "an", "array"],
-                    },
+                    content={"foo": ["I", "am", "an", "array"]},
                 ),
-                ["content.foo.1"]
+                ["content.foo.1"],
             ),
-            {}
+            {},
         )
 
     def test_event_fields_all_fields_if_empty(self):
@@ -274,31 +223,21 @@ class SerializeEventTestCase(unittest.TestCase):
                     type="foo",
                     event_id="test",
                     room_id="!foo:bar",
-                    content={
-                        "foo": "bar",
-                    },
+                    content={"foo": "bar"},
                 ),
-                []
+                [],
             ),
             {
                 "type": "foo",
                 "event_id": "test",
                 "room_id": "!foo:bar",
-                "content": {
-                    "foo": "bar",
-                },
-                "unsigned": {}
-            }
+                "content": {"foo": "bar"},
+                "unsigned": {},
+            },
         )
 
     def test_event_fields_fail_if_fields_not_str(self):
         with self.assertRaises(TypeError):
             self.serialize(
-                MockEvent(
-                    room_id="!foo:bar",
-                    content={
-                        "foo": "bar",
-                    },
-                ),
-                ["room_id", 4]
+                MockEvent(room_id="!foo:bar", content={"foo": "bar"}), ["room_id", 4]
             )
diff --git a/tests/federation/__init__.py b/tests/federation/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/federation/__init__.py
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
new file mode 100644
index 0000000000..af15f4cc5a
--- /dev/null
+++ b/tests/federation/test_federation_server.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from synapse.events import FrozenEvent
+from synapse.federation.federation_server import server_matches_acl_event
+
+from tests import unittest
+
+
+@unittest.DEBUG
+class ServerACLsTestCase(unittest.TestCase):
+    def test_blacklisted_server(self):
+        e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]})
+        logging.info("ACL event: %s", e.content)
+
+        self.assertFalse(server_matches_acl_event("evil.com", e))
+        self.assertFalse(server_matches_acl_event("EVIL.COM", e))
+
+        self.assertTrue(server_matches_acl_event("evil.com.au", e))
+        self.assertTrue(server_matches_acl_event("honestly.not.evil.com", e))
+
+    def test_block_ip_literals(self):
+        e = _create_acl_event({"allow_ip_literals": False, "allow": ["*"]})
+        logging.info("ACL event: %s", e.content)
+
+        self.assertFalse(server_matches_acl_event("1.2.3.4", e))
+        self.assertTrue(server_matches_acl_event("1a.2.3.4", e))
+        self.assertFalse(server_matches_acl_event("[1:2::]", e))
+        self.assertTrue(server_matches_acl_event("1:2:3:4", e))
+
+
+def _create_acl_event(content):
+    return FrozenEvent(
+        {
+            "room_id": "!a:b",
+            "event_id": "$a:b",
+            "type": "m.room.server_acls",
+            "sender": "@a:b",
+            "content": content,
+        }
+    )
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index b753455943..ba7148ec01 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -13,13 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from mock import Mock
+
 from twisted.internet import defer
-from .. import unittest
-from tests.utils import MockClock
 
 from synapse.handlers.appservice import ApplicationServicesHandler
 
-from mock import Mock
+from tests.utils import MockClock
+
+from .. import unittest
 
 
 class AppServiceHandlerTestCase(unittest.TestCase):
@@ -43,20 +45,18 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         services = [
             self._mkservice(is_interested=False),
             interested_service,
-            self._mkservice(is_interested=False)
+            self._mkservice(is_interested=False),
         ]
 
         self.mock_store.get_app_services = Mock(return_value=services)
         self.mock_store.get_user_by_id = Mock(return_value=[])
 
         event = Mock(
-            sender="@someone:anywhere",
-            type="m.room.message",
-            room_id="!foo:bar"
+            sender="@someone:anywhere", type="m.room.message", room_id="!foo:bar"
         )
         self.mock_store.get_new_events_for_appservice.side_effect = [
             (0, [event]),
-            (0, [])
+            (0, []),
         ]
         self.mock_as_api.push = Mock()
         yield self.handler.notify_interested_services(0)
@@ -72,21 +72,15 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         self.mock_store.get_app_services = Mock(return_value=services)
         self.mock_store.get_user_by_id = Mock(return_value=None)
 
-        event = Mock(
-            sender=user_id,
-            type="m.room.message",
-            room_id="!foo:bar"
-        )
+        event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar")
         self.mock_as_api.push = Mock()
         self.mock_as_api.query_user = Mock()
         self.mock_store.get_new_events_for_appservice.side_effect = [
             (0, [event]),
-            (0, [])
+            (0, []),
         ]
         yield self.handler.notify_interested_services(0)
-        self.mock_as_api.query_user.assert_called_once_with(
-            services[0], user_id
-        )
+        self.mock_as_api.query_user.assert_called_once_with(services[0], user_id)
 
     @defer.inlineCallbacks
     def test_query_user_exists_known_user(self):
@@ -94,25 +88,19 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         services = [self._mkservice(is_interested=True)]
         services[0].is_interested_in_user = Mock(return_value=True)
         self.mock_store.get_app_services = Mock(return_value=services)
-        self.mock_store.get_user_by_id = Mock(return_value={
-            "name": user_id
-        })
+        self.mock_store.get_user_by_id = Mock(return_value={"name": user_id})
 
-        event = Mock(
-            sender=user_id,
-            type="m.room.message",
-            room_id="!foo:bar"
-        )
+        event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar")
         self.mock_as_api.push = Mock()
         self.mock_as_api.query_user = Mock()
         self.mock_store.get_new_events_for_appservice.side_effect = [
             (0, [event]),
-            (0, [])
+            (0, []),
         ]
         yield self.handler.notify_interested_services(0)
         self.assertFalse(
             self.mock_as_api.query_user.called,
-            "query_user called when it shouldn't have been."
+            "query_user called when it shouldn't have been.",
         )
 
     @defer.inlineCallbacks
@@ -127,7 +115,7 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         services = [
             self._mkservice_alias(is_interested_in_alias=False),
             interested_service,
-            self._mkservice_alias(is_interested_in_alias=False)
+            self._mkservice_alias(is_interested_in_alias=False),
         ]
 
         self.mock_store.get_app_services = Mock(return_value=services)
@@ -138,8 +126,7 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         result = yield self.handler.query_room_alias_exists(room_alias)
 
         self.mock_as_api.query_alias.assert_called_once_with(
-            interested_service,
-            room_alias_str
+            interested_service, room_alias_str
         )
         self.assertEquals(result.room_id, room_id)
         self.assertEquals(result.servers, servers)
diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py
index 1822dcf1e0..1e39fe0ec2 100644
--- a/tests/handlers/test_auth.py
+++ b/tests/handlers/test_auth.py
@@ -12,13 +12,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from mock import Mock
 
 import pymacaroons
+
 from twisted.internet import defer
 
 import synapse
 import synapse.api.errors
+from synapse.api.errors import ResourceLimitError
 from synapse.handlers.auth import AuthHandler
+
 from tests import unittest
 from tests.utils import setup_test_homeserver
 
@@ -31,10 +35,14 @@ class AuthHandlers(object):
 class AuthTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def setUp(self):
-        self.hs = yield setup_test_homeserver(handlers=None)
+        self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None)
         self.hs.handlers = AuthHandlers(self.hs)
         self.auth_handler = self.hs.handlers.auth_handler
         self.macaroon_generator = self.hs.get_macaroon_generator()
+        # MAU tests
+        self.hs.config.max_mau_value = 50
+        self.small_number_of_users = 1
+        self.large_number_of_users = 100
 
     def test_token_is_a_macaroon(self):
         token = self.macaroon_generator.generate_access_token("some_user")
@@ -69,45 +77,123 @@ class AuthTestCase(unittest.TestCase):
         v.satisfy_general(verify_nonce)
         v.verify(macaroon, self.hs.config.macaroon_secret_key)
 
+    @defer.inlineCallbacks
     def test_short_term_login_token_gives_user_id(self):
         self.hs.clock.now = 1000
 
-        token = self.macaroon_generator.generate_short_term_login_token(
-            "a_user", 5000
-        )
-
-        self.assertEqual(
-            "a_user",
-            self.auth_handler.validate_short_term_login_token_and_get_user_id(
-                token
-            )
+        token = self.macaroon_generator.generate_short_term_login_token("a_user", 5000)
+        user_id = yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+            token
         )
+        self.assertEqual("a_user", user_id)
 
         # when we advance the clock, the token should be rejected
         self.hs.clock.now = 6000
         with self.assertRaises(synapse.api.errors.AuthError):
-            self.auth_handler.validate_short_term_login_token_and_get_user_id(
+            yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
                 token
             )
 
+    @defer.inlineCallbacks
     def test_short_term_login_token_cannot_replace_user_id(self):
-        token = self.macaroon_generator.generate_short_term_login_token(
-            "a_user", 5000
-        )
+        token = self.macaroon_generator.generate_short_term_login_token("a_user", 5000)
         macaroon = pymacaroons.Macaroon.deserialize(token)
 
-        self.assertEqual(
-            "a_user",
-            self.auth_handler.validate_short_term_login_token_and_get_user_id(
-                macaroon.serialize()
-            )
+        user_id = yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+            macaroon.serialize()
         )
+        self.assertEqual("a_user", user_id)
 
         # add another "user_id" caveat, which might allow us to override the
         # user_id.
         macaroon.add_first_party_caveat("user_id = b_user")
 
         with self.assertRaises(synapse.api.errors.AuthError):
-            self.auth_handler.validate_short_term_login_token_and_get_user_id(
+            yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
                 macaroon.serialize()
             )
+
+    @defer.inlineCallbacks
+    def test_mau_limits_disabled(self):
+        self.hs.config.limit_usage_by_mau = False
+        # Ensure does not throw exception
+        yield self.auth_handler.get_access_token_for_user_id('user_a')
+
+        yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+            self._get_macaroon().serialize()
+        )
+
+    @defer.inlineCallbacks
+    def test_mau_limits_exceeded_large(self):
+        self.hs.config.limit_usage_by_mau = True
+        self.hs.get_datastore().get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.large_number_of_users)
+        )
+
+        with self.assertRaises(ResourceLimitError):
+            yield self.auth_handler.get_access_token_for_user_id('user_a')
+
+        self.hs.get_datastore().get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.large_number_of_users)
+        )
+        with self.assertRaises(ResourceLimitError):
+            yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+                self._get_macaroon().serialize()
+            )
+
+    @defer.inlineCallbacks
+    def test_mau_limits_parity(self):
+        self.hs.config.limit_usage_by_mau = True
+
+        # If not in monthly active cohort
+        self.hs.get_datastore().get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.hs.config.max_mau_value)
+        )
+        with self.assertRaises(ResourceLimitError):
+            yield self.auth_handler.get_access_token_for_user_id('user_a')
+
+        self.hs.get_datastore().get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.hs.config.max_mau_value)
+        )
+        with self.assertRaises(ResourceLimitError):
+            yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+                self._get_macaroon().serialize()
+            )
+        # If in monthly active cohort
+        self.hs.get_datastore().user_last_seen_monthly_active = Mock(
+            return_value=defer.succeed(self.hs.get_clock().time_msec())
+        )
+        self.hs.get_datastore().get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.hs.config.max_mau_value)
+        )
+        yield self.auth_handler.get_access_token_for_user_id('user_a')
+        self.hs.get_datastore().user_last_seen_monthly_active = Mock(
+            return_value=defer.succeed(self.hs.get_clock().time_msec())
+        )
+        self.hs.get_datastore().get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.hs.config.max_mau_value)
+        )
+        yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+            self._get_macaroon().serialize()
+        )
+
+    @defer.inlineCallbacks
+    def test_mau_limits_not_exceeded(self):
+        self.hs.config.limit_usage_by_mau = True
+
+        self.hs.get_datastore().get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.small_number_of_users)
+        )
+        # Ensure does not raise exception
+        yield self.auth_handler.get_access_token_for_user_id('user_a')
+
+        self.hs.get_datastore().get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.small_number_of_users)
+        )
+        yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+            self._get_macaroon().serialize()
+        )
+
+    def _get_macaroon(self):
+        token = self.macaroon_generator.generate_short_term_login_token("user_a", 5000)
+        return pymacaroons.Macaroon.deserialize(token)
diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py
index 778ff2f6e9..56e7acd37c 100644
--- a/tests/handlers/test_device.py
+++ b/tests/handlers/test_device.py
@@ -17,8 +17,8 @@ from twisted.internet import defer
 
 import synapse.api.errors
 import synapse.handlers.device
-
 import synapse.storage
+
 from tests import unittest, utils
 
 user1 = "@boris:aaa"
@@ -28,13 +28,13 @@ user2 = "@theresa:bbb"
 class DeviceTestCase(unittest.TestCase):
     def __init__(self, *args, **kwargs):
         super(DeviceTestCase, self).__init__(*args, **kwargs)
-        self.store = None    # type: synapse.storage.DataStore
+        self.store = None  # type: synapse.storage.DataStore
         self.handler = None  # type: synapse.handlers.device.DeviceHandler
-        self.clock = None    # type: utils.MockClock
+        self.clock = None  # type: utils.MockClock
 
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield utils.setup_test_homeserver()
+        hs = yield utils.setup_test_homeserver(self.addCleanup)
         self.handler = hs.get_device_handler()
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
@@ -44,7 +44,7 @@ class DeviceTestCase(unittest.TestCase):
         res = yield self.handler.check_device_registered(
             user_id="@boris:foo",
             device_id="fco",
-            initial_device_display_name="display name"
+            initial_device_display_name="display name",
         )
         self.assertEqual(res, "fco")
 
@@ -56,14 +56,14 @@ class DeviceTestCase(unittest.TestCase):
         res1 = yield self.handler.check_device_registered(
             user_id="@boris:foo",
             device_id="fco",
-            initial_device_display_name="display name"
+            initial_device_display_name="display name",
         )
         self.assertEqual(res1, "fco")
 
         res2 = yield self.handler.check_device_registered(
             user_id="@boris:foo",
             device_id="fco",
-            initial_device_display_name="new display name"
+            initial_device_display_name="new display name",
         )
         self.assertEqual(res2, "fco")
 
@@ -75,7 +75,7 @@ class DeviceTestCase(unittest.TestCase):
         device_id = yield self.handler.check_device_registered(
             user_id="@theresa:foo",
             device_id=None,
-            initial_device_display_name="display"
+            initial_device_display_name="display",
         )
 
         dev = yield self.handler.store.get_device("@theresa:foo", device_id)
@@ -87,43 +87,53 @@ class DeviceTestCase(unittest.TestCase):
 
         res = yield self.handler.get_devices_by_user(user1)
         self.assertEqual(3, len(res))
-        device_map = {
-            d["device_id"]: d for d in res
-        }
-        self.assertDictContainsSubset({
-            "user_id": user1,
-            "device_id": "xyz",
-            "display_name": "display 0",
-            "last_seen_ip": None,
-            "last_seen_ts": None,
-        }, device_map["xyz"])
-        self.assertDictContainsSubset({
-            "user_id": user1,
-            "device_id": "fco",
-            "display_name": "display 1",
-            "last_seen_ip": "ip1",
-            "last_seen_ts": 1000000,
-        }, device_map["fco"])
-        self.assertDictContainsSubset({
-            "user_id": user1,
-            "device_id": "abc",
-            "display_name": "display 2",
-            "last_seen_ip": "ip3",
-            "last_seen_ts": 3000000,
-        }, device_map["abc"])
+        device_map = {d["device_id"]: d for d in res}
+        self.assertDictContainsSubset(
+            {
+                "user_id": user1,
+                "device_id": "xyz",
+                "display_name": "display 0",
+                "last_seen_ip": None,
+                "last_seen_ts": None,
+            },
+            device_map["xyz"],
+        )
+        self.assertDictContainsSubset(
+            {
+                "user_id": user1,
+                "device_id": "fco",
+                "display_name": "display 1",
+                "last_seen_ip": "ip1",
+                "last_seen_ts": 1000000,
+            },
+            device_map["fco"],
+        )
+        self.assertDictContainsSubset(
+            {
+                "user_id": user1,
+                "device_id": "abc",
+                "display_name": "display 2",
+                "last_seen_ip": "ip3",
+                "last_seen_ts": 3000000,
+            },
+            device_map["abc"],
+        )
 
     @defer.inlineCallbacks
     def test_get_device(self):
         yield self._record_users()
 
         res = yield self.handler.get_device(user1, "abc")
-        self.assertDictContainsSubset({
-            "user_id": user1,
-            "device_id": "abc",
-            "display_name": "display 2",
-            "last_seen_ip": "ip3",
-            "last_seen_ts": 3000000,
-        }, res)
+        self.assertDictContainsSubset(
+            {
+                "user_id": user1,
+                "device_id": "abc",
+                "display_name": "display 2",
+                "last_seen_ip": "ip3",
+                "last_seen_ts": 3000000,
+            },
+            res,
+        )
 
     @defer.inlineCallbacks
     def test_delete_device(self):
@@ -153,8 +163,7 @@ class DeviceTestCase(unittest.TestCase):
     def test_update_unknown_device(self):
         update = {"display_name": "new_display"}
         with self.assertRaises(synapse.api.errors.NotFoundError):
-            yield self.handler.update_device("user_id", "unknown_device_id",
-                                             update)
+            yield self.handler.update_device("user_id", "unknown_device_id", update)
 
     @defer.inlineCallbacks
     def _record_users(self):
@@ -168,16 +177,17 @@ class DeviceTestCase(unittest.TestCase):
         yield self._record_user(user2, "def", "dispkay", "token4", "ip4")
 
     @defer.inlineCallbacks
-    def _record_user(self, user_id, device_id, display_name,
-                     access_token=None, ip=None):
+    def _record_user(
+        self, user_id, device_id, display_name, access_token=None, ip=None
+    ):
         device_id = yield self.handler.check_device_registered(
             user_id=user_id,
             device_id=device_id,
-            initial_device_display_name=display_name
+            initial_device_display_name=display_name,
         )
 
         if ip is not None:
             yield self.store.insert_client_ip(
-                user_id,
-                access_token, ip, "user_agent", device_id)
+                user_id, access_token, ip, "user_agent", device_id
+            )
             self.clock.advance_time(1000)
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index 7e5332e272..ec7355688b 100644
--- a/tests/handlers/test_directory.py
+++ b/tests/handlers/test_directory.py
@@ -14,14 +14,14 @@
 # limitations under the License.
 
 
-from tests import unittest
-from twisted.internet import defer
-
 from mock import Mock
 
+from twisted.internet import defer
+
 from synapse.handlers.directory import DirectoryHandler
 from synapse.types import RoomAlias
 
+from tests import unittest
 from tests.utils import setup_test_homeserver
 
 
@@ -42,9 +42,11 @@ class DirectoryTestCase(unittest.TestCase):
 
         def register_query_handler(query_type, handler):
             self.query_handlers[query_type] = handler
+
         self.mock_registry.register_query_handler = register_query_handler
 
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             http_client=None,
             resource_for_federation=Mock(),
             federation_client=self.mock_federation,
@@ -68,10 +70,7 @@ class DirectoryTestCase(unittest.TestCase):
 
         result = yield self.handler.get_association(self.my_room)
 
-        self.assertEquals({
-            "room_id": "!8765qwer:test",
-            "servers": ["test"],
-        }, result)
+        self.assertEquals({"room_id": "!8765qwer:test", "servers": ["test"]}, result)
 
     @defer.inlineCallbacks
     def test_get_remote_association(self):
@@ -81,16 +80,13 @@ class DirectoryTestCase(unittest.TestCase):
 
         result = yield self.handler.get_association(self.remote_room)
 
-        self.assertEquals({
-            "room_id": "!8765qwer:test",
-            "servers": ["test", "remote"],
-        }, result)
+        self.assertEquals(
+            {"room_id": "!8765qwer:test", "servers": ["test", "remote"]}, result
+        )
         self.mock_federation.make_query.assert_called_with(
             destination="remote",
             query_type="directory",
-            args={
-                "room_alias": "#another:remote",
-            },
+            args={"room_alias": "#another:remote"},
             retry_on_dns_fail=False,
             ignore_backoff=True,
         )
@@ -105,7 +101,4 @@ class DirectoryTestCase(unittest.TestCase):
             {"room_alias": "#your-room:test"}
         )
 
-        self.assertEquals({
-            "room_id": "!8765asdf:test",
-            "servers": ["test"],
-        }, response)
+        self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response)
diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index d1bd87b898..8dccc6826e 100644
--- a/tests/handlers/test_e2e_keys.py
+++ b/tests/handlers/test_e2e_keys.py
@@ -14,27 +14,27 @@
 # limitations under the License.
 
 import mock
-from synapse.api import errors
+
 from twisted.internet import defer
 
 import synapse.api.errors
 import synapse.handlers.e2e_keys
-
 import synapse.storage
+from synapse.api import errors
+
 from tests import unittest, utils
 
 
 class E2eKeysHandlerTestCase(unittest.TestCase):
     def __init__(self, *args, **kwargs):
         super(E2eKeysHandlerTestCase, self).__init__(*args, **kwargs)
-        self.hs = None       # type: synapse.server.HomeServer
+        self.hs = None  # type: synapse.server.HomeServer
         self.handler = None  # type: synapse.handlers.e2e_keys.E2eKeysHandler
 
     @defer.inlineCallbacks
     def setUp(self):
         self.hs = yield utils.setup_test_homeserver(
-            handlers=None,
-            federation_client=mock.Mock(),
+            self.addCleanup, handlers=None, federation_client=mock.Mock()
         )
         self.handler = synapse.handlers.e2e_keys.E2eKeysHandler(self.hs)
 
@@ -53,30 +53,21 @@ class E2eKeysHandlerTestCase(unittest.TestCase):
         device_id = "xyz"
         keys = {
             "alg1:k1": "key1",
-            "alg2:k2": {
-                "key": "key2",
-                "signatures": {"k1": "sig1"}
-            },
-            "alg2:k3": {
-                "key": "key3",
-            },
+            "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}},
+            "alg2:k3": {"key": "key3"},
         }
 
         res = yield self.handler.upload_keys_for_user(
-            local_user, device_id, {"one_time_keys": keys},
+            local_user, device_id, {"one_time_keys": keys}
         )
-        self.assertDictEqual(res, {
-            "one_time_key_counts": {"alg1": 1, "alg2": 2}
-        })
+        self.assertDictEqual(res, {"one_time_key_counts": {"alg1": 1, "alg2": 2}})
 
         # we should be able to change the signature without a problem
         keys["alg2:k2"]["signatures"]["k1"] = "sig2"
         res = yield self.handler.upload_keys_for_user(
-            local_user, device_id, {"one_time_keys": keys},
+            local_user, device_id, {"one_time_keys": keys}
         )
-        self.assertDictEqual(res, {
-            "one_time_key_counts": {"alg1": 1, "alg2": 2}
-        })
+        self.assertDictEqual(res, {"one_time_key_counts": {"alg1": 1, "alg2": 2}})
 
     @defer.inlineCallbacks
     def test_change_one_time_keys(self):
@@ -86,25 +77,18 @@ class E2eKeysHandlerTestCase(unittest.TestCase):
         device_id = "xyz"
         keys = {
             "alg1:k1": "key1",
-            "alg2:k2": {
-                "key": "key2",
-                "signatures": {"k1": "sig1"}
-            },
-            "alg2:k3": {
-                "key": "key3",
-            },
+            "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}},
+            "alg2:k3": {"key": "key3"},
         }
 
         res = yield self.handler.upload_keys_for_user(
-            local_user, device_id, {"one_time_keys": keys},
+            local_user, device_id, {"one_time_keys": keys}
         )
-        self.assertDictEqual(res, {
-            "one_time_key_counts": {"alg1": 1, "alg2": 2}
-        })
+        self.assertDictEqual(res, {"one_time_key_counts": {"alg1": 1, "alg2": 2}})
 
         try:
             yield self.handler.upload_keys_for_user(
-                local_user, device_id, {"one_time_keys": {"alg1:k1": "key2"}},
+                local_user, device_id, {"one_time_keys": {"alg1:k1": "key2"}}
             )
             self.fail("No error when changing string key")
         except errors.SynapseError:
@@ -112,7 +96,7 @@ class E2eKeysHandlerTestCase(unittest.TestCase):
 
         try:
             yield self.handler.upload_keys_for_user(
-                local_user, device_id, {"one_time_keys": {"alg2:k3": "key2"}},
+                local_user, device_id, {"one_time_keys": {"alg2:k3": "key2"}}
             )
             self.fail("No error when replacing dict key with string")
         except errors.SynapseError:
@@ -120,9 +104,7 @@ class E2eKeysHandlerTestCase(unittest.TestCase):
 
         try:
             yield self.handler.upload_keys_for_user(
-                local_user, device_id, {
-                    "one_time_keys": {"alg1:k1": {"key": "key"}}
-                },
+                local_user, device_id, {"one_time_keys": {"alg1:k1": {"key": "key"}}}
             )
             self.fail("No error when replacing string key with dict")
         except errors.SynapseError:
@@ -130,13 +112,12 @@ class E2eKeysHandlerTestCase(unittest.TestCase):
 
         try:
             yield self.handler.upload_keys_for_user(
-                local_user, device_id, {
+                local_user,
+                device_id,
+                {
                     "one_time_keys": {
-                        "alg2:k2": {
-                            "key": "key3",
-                            "signatures": {"k1": "sig1"},
-                        }
-                    },
+                        "alg2:k2": {"key": "key3", "signatures": {"k1": "sig1"}}
+                    }
                 },
             )
             self.fail("No error when replacing dict key")
@@ -147,31 +128,20 @@ class E2eKeysHandlerTestCase(unittest.TestCase):
     def test_claim_one_time_key(self):
         local_user = "@boris:" + self.hs.hostname
         device_id = "xyz"
-        keys = {
-            "alg1:k1": "key1",
-        }
+        keys = {"alg1:k1": "key1"}
 
         res = yield self.handler.upload_keys_for_user(
-            local_user, device_id, {"one_time_keys": keys},
+            local_user, device_id, {"one_time_keys": keys}
+        )
+        self.assertDictEqual(res, {"one_time_key_counts": {"alg1": 1}})
+
+        res2 = yield self.handler.claim_one_time_keys(
+            {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None
+        )
+        self.assertEqual(
+            res2,
+            {
+                "failures": {},
+                "one_time_keys": {local_user: {device_id: {"alg1:k1": "key1"}}},
+            },
         )
-        self.assertDictEqual(res, {
-            "one_time_key_counts": {"alg1": 1}
-        })
-
-        res2 = yield self.handler.claim_one_time_keys({
-            "one_time_keys": {
-                local_user: {
-                    device_id: "alg1"
-                }
-            }
-        }, timeout=None)
-        self.assertEqual(res2, {
-            "failures": {},
-            "one_time_keys": {
-                local_user: {
-                    device_id: {
-                        "alg1:k1": "key1"
-                    }
-                }
-            }
-        })
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index de06a6ad30..fc2b646ba2 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -14,18 +14,22 @@
 # limitations under the License.
 
 
-from tests import unittest
-
 from mock import Mock, call
 
 from synapse.api.constants import PresenceState
 from synapse.handlers.presence import (
-    handle_update, handle_timeout,
-    IDLE_TIMER, SYNC_ONLINE_TIMEOUT, LAST_ACTIVE_GRANULARITY, FEDERATION_TIMEOUT,
     FEDERATION_PING_INTERVAL,
+    FEDERATION_TIMEOUT,
+    IDLE_TIMER,
+    LAST_ACTIVE_GRANULARITY,
+    SYNC_ONLINE_TIMEOUT,
+    handle_timeout,
+    handle_update,
 )
 from synapse.storage.presence import UserPresenceState
 
+from tests import unittest
+
 
 class PresenceUpdateTestCase(unittest.TestCase):
     def test_offline_to_online(self):
@@ -35,8 +39,7 @@ class PresenceUpdateTestCase(unittest.TestCase):
 
         prev_state = UserPresenceState.default(user_id)
         new_state = prev_state.copy_and_replace(
-            state=PresenceState.ONLINE,
-            last_active_ts=now,
+            state=PresenceState.ONLINE, last_active_ts=now
         )
 
         state, persist_and_notify, federation_ping = handle_update(
@@ -50,23 +53,22 @@ class PresenceUpdateTestCase(unittest.TestCase):
         self.assertEquals(state.last_federation_update_ts, now)
 
         self.assertEquals(wheel_timer.insert.call_count, 3)
-        wheel_timer.insert.assert_has_calls([
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_active_ts + IDLE_TIMER
-            ),
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT
-            ),
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY
-            ),
-        ], any_order=True)
+        wheel_timer.insert.assert_has_calls(
+            [
+                call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER),
+                call(
+                    now=now,
+                    obj=user_id,
+                    then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
+                ),
+                call(
+                    now=now,
+                    obj=user_id,
+                    then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY,
+                ),
+            ],
+            any_order=True,
+        )
 
     def test_online_to_online(self):
         wheel_timer = Mock()
@@ -75,14 +77,11 @@ class PresenceUpdateTestCase(unittest.TestCase):
 
         prev_state = UserPresenceState.default(user_id)
         prev_state = prev_state.copy_and_replace(
-            state=PresenceState.ONLINE,
-            last_active_ts=now,
-            currently_active=True,
+            state=PresenceState.ONLINE, last_active_ts=now, currently_active=True
         )
 
         new_state = prev_state.copy_and_replace(
-            state=PresenceState.ONLINE,
-            last_active_ts=now,
+            state=PresenceState.ONLINE, last_active_ts=now
         )
 
         state, persist_and_notify, federation_ping = handle_update(
@@ -97,23 +96,22 @@ class PresenceUpdateTestCase(unittest.TestCase):
         self.assertEquals(state.last_federation_update_ts, now)
 
         self.assertEquals(wheel_timer.insert.call_count, 3)
-        wheel_timer.insert.assert_has_calls([
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_active_ts + IDLE_TIMER
-            ),
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT
-            ),
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY
-            ),
-        ], any_order=True)
+        wheel_timer.insert.assert_has_calls(
+            [
+                call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER),
+                call(
+                    now=now,
+                    obj=user_id,
+                    then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
+                ),
+                call(
+                    now=now,
+                    obj=user_id,
+                    then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY,
+                ),
+            ],
+            any_order=True,
+        )
 
     def test_online_to_online_last_active_noop(self):
         wheel_timer = Mock()
@@ -128,8 +126,7 @@ class PresenceUpdateTestCase(unittest.TestCase):
         )
 
         new_state = prev_state.copy_and_replace(
-            state=PresenceState.ONLINE,
-            last_active_ts=now,
+            state=PresenceState.ONLINE, last_active_ts=now
         )
 
         state, persist_and_notify, federation_ping = handle_update(
@@ -144,23 +141,22 @@ class PresenceUpdateTestCase(unittest.TestCase):
         self.assertEquals(state.last_federation_update_ts, now)
 
         self.assertEquals(wheel_timer.insert.call_count, 3)
-        wheel_timer.insert.assert_has_calls([
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_active_ts + IDLE_TIMER
-            ),
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT
-            ),
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY
-            ),
-        ], any_order=True)
+        wheel_timer.insert.assert_has_calls(
+            [
+                call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER),
+                call(
+                    now=now,
+                    obj=user_id,
+                    then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
+                ),
+                call(
+                    now=now,
+                    obj=user_id,
+                    then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY,
+                ),
+            ],
+            any_order=True,
+        )
 
     def test_online_to_online_last_active(self):
         wheel_timer = Mock()
@@ -174,9 +170,7 @@ class PresenceUpdateTestCase(unittest.TestCase):
             currently_active=True,
         )
 
-        new_state = prev_state.copy_and_replace(
-            state=PresenceState.ONLINE,
-        )
+        new_state = prev_state.copy_and_replace(state=PresenceState.ONLINE)
 
         state, persist_and_notify, federation_ping = handle_update(
             prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now
@@ -189,18 +183,17 @@ class PresenceUpdateTestCase(unittest.TestCase):
         self.assertEquals(state.last_federation_update_ts, now)
 
         self.assertEquals(wheel_timer.insert.call_count, 2)
-        wheel_timer.insert.assert_has_calls([
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_active_ts + IDLE_TIMER
-            ),
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT
-            )
-        ], any_order=True)
+        wheel_timer.insert.assert_has_calls(
+            [
+                call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER),
+                call(
+                    now=now,
+                    obj=user_id,
+                    then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
+                ),
+            ],
+            any_order=True,
+        )
 
     def test_remote_ping_timer(self):
         wheel_timer = Mock()
@@ -209,13 +202,10 @@ class PresenceUpdateTestCase(unittest.TestCase):
 
         prev_state = UserPresenceState.default(user_id)
         prev_state = prev_state.copy_and_replace(
-            state=PresenceState.ONLINE,
-            last_active_ts=now,
+            state=PresenceState.ONLINE, last_active_ts=now
         )
 
-        new_state = prev_state.copy_and_replace(
-            state=PresenceState.ONLINE,
-        )
+        new_state = prev_state.copy_and_replace(state=PresenceState.ONLINE)
 
         state, persist_and_notify, federation_ping = handle_update(
             prev_state, new_state, is_mine=False, wheel_timer=wheel_timer, now=now
@@ -228,13 +218,16 @@ class PresenceUpdateTestCase(unittest.TestCase):
         self.assertEquals(new_state.status_msg, state.status_msg)
 
         self.assertEquals(wheel_timer.insert.call_count, 1)
-        wheel_timer.insert.assert_has_calls([
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT
-            ),
-        ], any_order=True)
+        wheel_timer.insert.assert_has_calls(
+            [
+                call(
+                    now=now,
+                    obj=user_id,
+                    then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT,
+                )
+            ],
+            any_order=True,
+        )
 
     def test_online_to_offline(self):
         wheel_timer = Mock()
@@ -243,14 +236,10 @@ class PresenceUpdateTestCase(unittest.TestCase):
 
         prev_state = UserPresenceState.default(user_id)
         prev_state = prev_state.copy_and_replace(
-            state=PresenceState.ONLINE,
-            last_active_ts=now,
-            currently_active=True,
+            state=PresenceState.ONLINE, last_active_ts=now, currently_active=True
         )
 
-        new_state = prev_state.copy_and_replace(
-            state=PresenceState.OFFLINE,
-        )
+        new_state = prev_state.copy_and_replace(state=PresenceState.OFFLINE)
 
         state, persist_and_notify, federation_ping = handle_update(
             prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now
@@ -269,14 +258,10 @@ class PresenceUpdateTestCase(unittest.TestCase):
 
         prev_state = UserPresenceState.default(user_id)
         prev_state = prev_state.copy_and_replace(
-            state=PresenceState.ONLINE,
-            last_active_ts=now,
-            currently_active=True,
+            state=PresenceState.ONLINE, last_active_ts=now, currently_active=True
         )
 
-        new_state = prev_state.copy_and_replace(
-            state=PresenceState.UNAVAILABLE,
-        )
+        new_state = prev_state.copy_and_replace(state=PresenceState.UNAVAILABLE)
 
         state, persist_and_notify, federation_ping = handle_update(
             prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now
@@ -289,13 +274,16 @@ class PresenceUpdateTestCase(unittest.TestCase):
         self.assertEquals(new_state.status_msg, state.status_msg)
 
         self.assertEquals(wheel_timer.insert.call_count, 1)
-        wheel_timer.insert.assert_has_calls([
-            call(
-                now=now,
-                obj=user_id,
-                then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT
-            )
-        ], any_order=True)
+        wheel_timer.insert.assert_has_calls(
+            [
+                call(
+                    now=now,
+                    obj=user_id,
+                    then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
+                )
+            ],
+            any_order=True,
+        )
 
 
 class PresenceTimeoutTestCase(unittest.TestCase):
@@ -310,9 +298,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
             last_user_sync_ts=now,
         )
 
-        new_state = handle_timeout(
-            state, is_mine=True, syncing_user_ids=set(), now=now
-        )
+        new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
 
         self.assertIsNotNone(new_state)
         self.assertEquals(new_state.state, PresenceState.UNAVAILABLE)
@@ -328,9 +314,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
             last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1,
         )
 
-        new_state = handle_timeout(
-            state, is_mine=True, syncing_user_ids=set(), now=now
-        )
+        new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
 
         self.assertIsNotNone(new_state)
         self.assertEquals(new_state.state, PresenceState.OFFLINE)
@@ -365,9 +349,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
             last_federation_update_ts=now - FEDERATION_PING_INTERVAL - 1,
         )
 
-        new_state = handle_timeout(
-            state, is_mine=True, syncing_user_ids=set(), now=now
-        )
+        new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
 
         self.assertIsNotNone(new_state)
         self.assertEquals(new_state, new_state)
@@ -384,9 +366,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
             last_federation_update_ts=now,
         )
 
-        new_state = handle_timeout(
-            state, is_mine=True, syncing_user_ids=set(), now=now
-        )
+        new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
 
         self.assertIsNone(new_state)
 
@@ -421,9 +401,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
             last_federation_update_ts=now,
         )
 
-        new_state = handle_timeout(
-            state, is_mine=True, syncing_user_ids=set(), now=now
-        )
+        new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
 
         self.assertIsNotNone(new_state)
         self.assertEquals(state, new_state)
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index 458296ee4c..80da1c8954 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -14,22 +14,22 @@
 # limitations under the License.
 
 
-from tests import unittest
-from twisted.internet import defer
-
 from mock import Mock, NonCallableMock
 
+from twisted.internet import defer
+
 import synapse.types
 from synapse.api.errors import AuthError
-from synapse.handlers.profile import ProfileHandler
+from synapse.handlers.profile import MasterProfileHandler
 from synapse.types import UserID
 
+from tests import unittest
 from tests.utils import setup_test_homeserver
 
 
 class ProfileHandlers(object):
     def __init__(self, hs):
-        self.profile_handler = ProfileHandler(hs)
+        self.profile_handler = MasterProfileHandler(hs)
 
 
 class ProfileTestCase(unittest.TestCase):
@@ -48,15 +48,14 @@ class ProfileTestCase(unittest.TestCase):
         self.mock_registry.register_query_handler = register_query_handler
 
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             http_client=None,
             handlers=None,
             resource_for_federation=Mock(),
             federation_client=self.mock_federation,
             federation_server=Mock(),
             federation_registry=self.mock_registry,
-            ratelimiter=NonCallableMock(spec_set=[
-                "send_message",
-            ])
+            ratelimiter=NonCallableMock(spec_set=["send_message"]),
         )
 
         self.ratelimiter = hs.get_ratelimiter()
@@ -74,9 +73,7 @@ class ProfileTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_my_name(self):
-        yield self.store.set_profile_displayname(
-            self.frank.localpart, "Frank"
-        )
+        yield self.store.set_profile_displayname(self.frank.localpart, "Frank")
 
         displayname = yield self.handler.get_displayname(self.frank)
 
@@ -85,22 +82,18 @@ class ProfileTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_set_my_name(self):
         yield self.handler.set_displayname(
-            self.frank,
-            synapse.types.create_requester(self.frank),
-            "Frank Jr."
+            self.frank, synapse.types.create_requester(self.frank), "Frank Jr."
         )
 
         self.assertEquals(
             (yield self.store.get_profile_displayname(self.frank.localpart)),
-            "Frank Jr."
+            "Frank Jr.",
         )
 
     @defer.inlineCallbacks
     def test_set_my_name_noauth(self):
         d = self.handler.set_displayname(
-            self.frank,
-            synapse.types.create_requester(self.bob),
-            "Frank Jr."
+            self.frank, synapse.types.create_requester(self.bob), "Frank Jr."
         )
 
         yield self.assertFailure(d, AuthError)
@@ -145,11 +138,12 @@ class ProfileTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_set_my_avatar(self):
         yield self.handler.set_avatar_url(
-            self.frank, synapse.types.create_requester(self.frank),
-            "http://my.server/pic.gif"
+            self.frank,
+            synapse.types.create_requester(self.frank),
+            "http://my.server/pic.gif",
         )
 
         self.assertEquals(
             (yield self.store.get_profile_avatar_url(self.frank.localpart)),
-            "http://my.server/pic.gif"
+            "http://my.server/pic.gif",
         )
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index e990e45220..7b4ade3dfb 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -13,15 +13,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from mock import Mock
+
 from twisted.internet import defer
-from .. import unittest
 
+from synapse.api.errors import ResourceLimitError
 from synapse.handlers.register import RegistrationHandler
 from synapse.types import UserID, create_requester
 
 from tests.utils import setup_test_homeserver
 
-from mock import Mock
+from .. import unittest
 
 
 class RegistrationHandlers(object):
@@ -38,16 +40,22 @@ class RegistrationTestCase(unittest.TestCase):
         self.mock_distributor.declare("registered_user")
         self.mock_captcha_client = Mock()
         self.hs = yield setup_test_homeserver(
+            self.addCleanup,
             handlers=None,
             http_client=None,
             expire_access_token=True,
             profile_handler=Mock(),
         )
         self.macaroon_generator = Mock(
-            generate_access_token=Mock(return_value='secret'))
+            generate_access_token=Mock(return_value='secret')
+        )
         self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator)
         self.hs.handlers = RegistrationHandlers(self.hs)
         self.handler = self.hs.get_handlers().registration_handler
+        self.store = self.hs.get_datastore()
+        self.hs.config.max_mau_value = 50
+        self.lots_of_users = 100
+        self.small_number_of_users = 1
 
     @defer.inlineCallbacks
     def test_user_is_created_and_logged_in_if_doesnt_exist(self):
@@ -56,7 +64,8 @@ class RegistrationTestCase(unittest.TestCase):
         user_id = "@someone:test"
         requester = create_requester("@as:test")
         result_user_id, result_token = yield self.handler.get_or_create_user(
-            requester, local_part, display_name)
+            requester, local_part, display_name
+        )
         self.assertEquals(result_user_id, user_id)
         self.assertEquals(result_token, 'secret')
 
@@ -67,12 +76,74 @@ class RegistrationTestCase(unittest.TestCase):
         yield store.register(
             user_id=frank.to_string(),
             token="jkv;g498752-43gj['eamb!-5",
-            password_hash=None)
+            password_hash=None,
+        )
         local_part = "frank"
         display_name = "Frank"
         user_id = "@frank:test"
         requester = create_requester("@as:test")
         result_user_id, result_token = yield self.handler.get_or_create_user(
-            requester, local_part, display_name)
+            requester, local_part, display_name
+        )
         self.assertEquals(result_user_id, user_id)
         self.assertEquals(result_token, 'secret')
+
+    @defer.inlineCallbacks
+    def test_mau_limits_when_disabled(self):
+        self.hs.config.limit_usage_by_mau = False
+        # Ensure does not throw exception
+        yield self.handler.get_or_create_user("requester", 'a', "display_name")
+
+    @defer.inlineCallbacks
+    def test_get_or_create_user_mau_not_blocked(self):
+        self.hs.config.limit_usage_by_mau = True
+        self.store.count_monthly_users = Mock(
+            return_value=defer.succeed(self.hs.config.max_mau_value - 1)
+        )
+        # Ensure does not throw exception
+        yield self.handler.get_or_create_user("@user:server", 'c', "User")
+
+    @defer.inlineCallbacks
+    def test_get_or_create_user_mau_blocked(self):
+        self.hs.config.limit_usage_by_mau = True
+        self.store.get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.lots_of_users)
+        )
+        with self.assertRaises(ResourceLimitError):
+            yield self.handler.get_or_create_user("requester", 'b', "display_name")
+
+        self.store.get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.hs.config.max_mau_value)
+        )
+        with self.assertRaises(ResourceLimitError):
+            yield self.handler.get_or_create_user("requester", 'b', "display_name")
+
+    @defer.inlineCallbacks
+    def test_register_mau_blocked(self):
+        self.hs.config.limit_usage_by_mau = True
+        self.store.get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.lots_of_users)
+        )
+        with self.assertRaises(ResourceLimitError):
+            yield self.handler.register(localpart="local_part")
+
+        self.store.get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.hs.config.max_mau_value)
+        )
+        with self.assertRaises(ResourceLimitError):
+            yield self.handler.register(localpart="local_part")
+
+    @defer.inlineCallbacks
+    def test_register_saml2_mau_blocked(self):
+        self.hs.config.limit_usage_by_mau = True
+        self.store.get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.lots_of_users)
+        )
+        with self.assertRaises(ResourceLimitError):
+            yield self.handler.register_saml2(localpart="local_part")
+
+        self.store.get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.hs.config.max_mau_value)
+        )
+        with self.assertRaises(ResourceLimitError):
+            yield self.handler.register_saml2(localpart="local_part")
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
new file mode 100644
index 0000000000..31f54bbd7d
--- /dev/null
+++ b/tests/handlers/test_sync.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import defer
+
+from synapse.api.errors import Codes, ResourceLimitError
+from synapse.api.filtering import DEFAULT_FILTER_COLLECTION
+from synapse.handlers.sync import SyncConfig, SyncHandler
+from synapse.types import UserID
+
+import tests.unittest
+import tests.utils
+from tests.utils import setup_test_homeserver
+
+
+class SyncTestCase(tests.unittest.TestCase):
+    """ Tests Sync Handler. """
+
+    @defer.inlineCallbacks
+    def setUp(self):
+        self.hs = yield setup_test_homeserver(self.addCleanup)
+        self.sync_handler = SyncHandler(self.hs)
+        self.store = self.hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def test_wait_for_sync_for_user_auth_blocking(self):
+
+        user_id1 = "@user1:server"
+        user_id2 = "@user2:server"
+        sync_config = self._generate_sync_config(user_id1)
+
+        self.hs.config.limit_usage_by_mau = True
+        self.hs.config.max_mau_value = 1
+
+        # Check that the happy case does not throw errors
+        yield self.store.upsert_monthly_active_user(user_id1)
+        yield self.sync_handler.wait_for_sync_for_user(sync_config)
+
+        # Test that global lock works
+        self.hs.config.hs_disabled = True
+        with self.assertRaises(ResourceLimitError) as e:
+            yield self.sync_handler.wait_for_sync_for_user(sync_config)
+        self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+
+        self.hs.config.hs_disabled = False
+
+        sync_config = self._generate_sync_config(user_id2)
+
+        with self.assertRaises(ResourceLimitError) as e:
+            yield self.sync_handler.wait_for_sync_for_user(sync_config)
+        self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+
+    def _generate_sync_config(self, user_id):
+        return SyncConfig(
+            user=UserID(user_id.split(":")[0][1:], user_id.split(":")[1]),
+            filter_collection=DEFAULT_FILTER_COLLECTION,
+            is_guest=False,
+            request_key="request_key",
+            device_id="device_id",
+        )
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index a433bbfa8a..ad58073a14 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -14,41 +14,43 @@
 # limitations under the License.
 
 
-from tests import unittest
-from twisted.internet import defer
-
-from mock import Mock, call, ANY
 import json
 
-from ..utils import (
-    MockHttpResource, MockClock, DeferredMockCallable, setup_test_homeserver
-)
+from mock import ANY, Mock, call
+
+from twisted.internet import defer
 
 from synapse.api.errors import AuthError
 from synapse.types import UserID
 
+from tests import unittest
+
+from ..utils import (
+    DeferredMockCallable,
+    MockClock,
+    MockHttpResource,
+    setup_test_homeserver,
+)
+
 
 def _expect_edu(destination, edu_type, content, origin="test"):
     return {
         "origin": origin,
         "origin_server_ts": 1000000,
         "pdus": [],
-        "edus": [
-            {
-                "edu_type": edu_type,
-                "content": content,
-            }
-        ],
-        "pdu_failures": [],
+        "edus": [{"edu_type": edu_type, "content": content}],
     }
 
 
 def _make_edu_json(origin, edu_type, content):
-    return json.dumps(_expect_edu("test", edu_type, content, origin=origin))
+    return json.dumps(_expect_edu("test", edu_type, content, origin=origin)).encode(
+        'utf8'
+    )
 
 
 class TypingNotificationsTestCase(unittest.TestCase):
     """Tests typing notifications to rooms."""
+
     @defer.inlineCallbacks
     def setUp(self):
         self.clock = MockClock()
@@ -65,21 +67,24 @@ class TypingNotificationsTestCase(unittest.TestCase):
         self.state_handler = Mock()
 
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             "test",
             auth=self.auth,
             clock=self.clock,
-            datastore=Mock(spec=[
-                # Bits that Federation needs
-                "prep_send_transaction",
-                "delivered_txn",
-                "get_received_txn_response",
-                "set_received_txn_response",
-                "get_destination_retry_timings",
-                "get_devices_by_remote",
-                # Bits that user_directory needs
-                "get_user_directory_stream_pos",
-                "get_current_state_deltas",
-            ]),
+            datastore=Mock(
+                spec=[
+                    # Bits that Federation needs
+                    "prep_send_transaction",
+                    "delivered_txn",
+                    "get_received_txn_response",
+                    "set_received_txn_response",
+                    "get_destination_retry_timings",
+                    "get_devices_by_remote",
+                    # Bits that user_directory needs
+                    "get_user_directory_stream_pos",
+                    "get_current_state_deltas",
+                ]
+            ),
             state_handler=self.state_handler,
             handlers=Mock(),
             notifier=mock_notifier,
@@ -94,19 +99,16 @@ class TypingNotificationsTestCase(unittest.TestCase):
         self.event_source = hs.get_event_sources().sources["typing"]
 
         self.datastore = hs.get_datastore()
-        retry_timings_res = {
-            "destination": "",
-            "retry_last_ts": 0,
-            "retry_interval": 0,
-        }
-        self.datastore.get_destination_retry_timings.return_value = (
-            defer.succeed(retry_timings_res)
+        retry_timings_res = {"destination": "", "retry_last_ts": 0, "retry_interval": 0}
+        self.datastore.get_destination_retry_timings.return_value = defer.succeed(
+            retry_timings_res
         )
 
         self.datastore.get_devices_by_remote.return_value = (0, [])
 
         def get_received_txn_response(*args):
             return defer.succeed(None)
+
         self.datastore.get_received_txn_response = get_received_txn_response
 
         self.room_id = "a-room"
@@ -119,10 +121,12 @@ class TypingNotificationsTestCase(unittest.TestCase):
 
         def get_joined_hosts_for_room(room_id):
             return set(member.domain for member in self.room_members)
+
         self.datastore.get_joined_hosts_for_room = get_joined_hosts_for_room
 
         def get_current_user_in_room(room_id):
             return set(str(u) for u in self.room_members)
+
         self.state_handler.get_current_user_in_room = get_current_user_in_room
 
         self.datastore.get_user_directory_stream_pos.return_value = (
@@ -130,19 +134,13 @@ class TypingNotificationsTestCase(unittest.TestCase):
             defer.succeed(1)
         )
 
-        self.datastore.get_current_state_deltas.return_value = (
-            None
-        )
+        self.datastore.get_current_state_deltas.return_value = None
 
         self.auth.check_joined_room = check_joined_room
 
         self.datastore.get_to_device_stream_token = lambda: 0
-        self.datastore.get_new_device_msgs_for_remote = (
-            lambda *args, **kargs: ([], 0)
-        )
-        self.datastore.delete_device_msgs_for_remote = (
-            lambda *args, **kargs: None
-        )
+        self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
+        self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
 
         # Some local users to test with
         self.u_apple = UserID.from_string("@apple:test")
@@ -164,24 +162,23 @@ class TypingNotificationsTestCase(unittest.TestCase):
             timeout=20000,
         )
 
-        self.on_new_event.assert_has_calls([
-            call('typing_key', 1, rooms=[self.room_id]),
-        ])
+        self.on_new_event.assert_has_calls(
+            [call('typing_key', 1, rooms=[self.room_id])]
+        )
 
         self.assertEquals(self.event_source.get_current_key(), 1)
         events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id],
-            from_key=0,
+            room_ids=[self.room_id], from_key=0
         )
         self.assertEquals(
             events[0],
             [
-                {"type": "m.typing",
-                 "room_id": self.room_id,
-                 "content": {
-                     "user_ids": [self.u_apple.to_string()],
-                 }},
-            ]
+                {
+                    "type": "m.typing",
+                    "room_id": self.room_id,
+                    "content": {"user_ids": [self.u_apple.to_string()]},
+                }
+            ],
         )
 
     @defer.inlineCallbacks
@@ -200,13 +197,13 @@ class TypingNotificationsTestCase(unittest.TestCase):
                         "room_id": self.room_id,
                         "user_id": self.u_apple.to_string(),
                         "typing": True,
-                    }
+                    },
                 ),
                 json_data_callback=ANY,
                 long_retries=True,
                 backoff_on_404=True,
             ),
-            defer.succeed((200, "OK"))
+            defer.succeed((200, "OK")),
         )
 
         yield self.handler.started_typing(
@@ -234,27 +231,29 @@ class TypingNotificationsTestCase(unittest.TestCase):
                     "room_id": self.room_id,
                     "user_id": self.u_onion.to_string(),
                     "typing": True,
-                }
+                },
             ),
             federation_auth=True,
         )
 
-        self.on_new_event.assert_has_calls([
-            call('typing_key', 1, rooms=[self.room_id]),
-        ])
+        self.on_new_event.assert_has_calls(
+            [call('typing_key', 1, rooms=[self.room_id])]
+        )
 
         self.assertEquals(self.event_source.get_current_key(), 1)
         events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id],
-            from_key=0
+            room_ids=[self.room_id], from_key=0
+        )
+        self.assertEquals(
+            events[0],
+            [
+                {
+                    "type": "m.typing",
+                    "room_id": self.room_id,
+                    "content": {"user_ids": [self.u_onion.to_string()]},
+                }
+            ],
         )
-        self.assertEquals(events[0], [{
-            "type": "m.typing",
-            "room_id": self.room_id,
-            "content": {
-                "user_ids": [self.u_onion.to_string()],
-            },
-        }])
 
     @defer.inlineCallbacks
     def test_stopped_typing(self):
@@ -272,17 +271,18 @@ class TypingNotificationsTestCase(unittest.TestCase):
                         "room_id": self.room_id,
                         "user_id": self.u_apple.to_string(),
                         "typing": False,
-                    }
+                    },
                 ),
                 json_data_callback=ANY,
                 long_retries=True,
                 backoff_on_404=True,
             ),
-            defer.succeed((200, "OK"))
+            defer.succeed((200, "OK")),
         )
 
         # Gut-wrenching
         from synapse.handlers.typing import RoomMember
+
         member = RoomMember(self.room_id, self.u_apple.to_string())
         self.handler._member_typing_until[member] = 1002000
         self.handler._room_typing[self.room_id] = set([self.u_apple.to_string()])
@@ -290,29 +290,29 @@ class TypingNotificationsTestCase(unittest.TestCase):
         self.assertEquals(self.event_source.get_current_key(), 0)
 
         yield self.handler.stopped_typing(
-            target_user=self.u_apple,
-            auth_user=self.u_apple,
-            room_id=self.room_id,
+            target_user=self.u_apple, auth_user=self.u_apple, room_id=self.room_id
         )
 
-        self.on_new_event.assert_has_calls([
-            call('typing_key', 1, rooms=[self.room_id]),
-        ])
+        self.on_new_event.assert_has_calls(
+            [call('typing_key', 1, rooms=[self.room_id])]
+        )
 
         yield put_json.await_calls()
 
         self.assertEquals(self.event_source.get_current_key(), 1)
         events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id],
-            from_key=0,
+            room_ids=[self.room_id], from_key=0
+        )
+        self.assertEquals(
+            events[0],
+            [
+                {
+                    "type": "m.typing",
+                    "room_id": self.room_id,
+                    "content": {"user_ids": []},
+                }
+            ],
         )
-        self.assertEquals(events[0], [{
-            "type": "m.typing",
-            "room_id": self.room_id,
-            "content": {
-                "user_ids": [],
-            },
-        }])
 
     @defer.inlineCallbacks
     def test_typing_timeout(self):
@@ -327,42 +327,46 @@ class TypingNotificationsTestCase(unittest.TestCase):
             timeout=10000,
         )
 
-        self.on_new_event.assert_has_calls([
-            call('typing_key', 1, rooms=[self.room_id]),
-        ])
+        self.on_new_event.assert_has_calls(
+            [call('typing_key', 1, rooms=[self.room_id])]
+        )
         self.on_new_event.reset_mock()
 
         self.assertEquals(self.event_source.get_current_key(), 1)
         events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id],
-            from_key=0,
+            room_ids=[self.room_id], from_key=0
+        )
+        self.assertEquals(
+            events[0],
+            [
+                {
+                    "type": "m.typing",
+                    "room_id": self.room_id,
+                    "content": {"user_ids": [self.u_apple.to_string()]},
+                }
+            ],
         )
-        self.assertEquals(events[0], [{
-            "type": "m.typing",
-            "room_id": self.room_id,
-            "content": {
-                "user_ids": [self.u_apple.to_string()],
-            },
-        }])
 
         self.clock.advance_time(16)
 
-        self.on_new_event.assert_has_calls([
-            call('typing_key', 2, rooms=[self.room_id]),
-        ])
+        self.on_new_event.assert_has_calls(
+            [call('typing_key', 2, rooms=[self.room_id])]
+        )
 
         self.assertEquals(self.event_source.get_current_key(), 2)
         events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id],
-            from_key=1,
+            room_ids=[self.room_id], from_key=1
+        )
+        self.assertEquals(
+            events[0],
+            [
+                {
+                    "type": "m.typing",
+                    "room_id": self.room_id,
+                    "content": {"user_ids": []},
+                }
+            ],
         )
-        self.assertEquals(events[0], [{
-            "type": "m.typing",
-            "room_id": self.room_id,
-            "content": {
-                "user_ids": [],
-            },
-        }])
 
         # SYN-230 - see if we can still set after timeout
 
@@ -373,20 +377,22 @@ class TypingNotificationsTestCase(unittest.TestCase):
             timeout=10000,
         )
 
-        self.on_new_event.assert_has_calls([
-            call('typing_key', 3, rooms=[self.room_id]),
-        ])
+        self.on_new_event.assert_has_calls(
+            [call('typing_key', 3, rooms=[self.room_id])]
+        )
         self.on_new_event.reset_mock()
 
         self.assertEquals(self.event_source.get_current_key(), 3)
         events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id],
-            from_key=0,
+            room_ids=[self.room_id], from_key=0
+        )
+        self.assertEquals(
+            events[0],
+            [
+                {
+                    "type": "m.typing",
+                    "room_id": self.room_id,
+                    "content": {"user_ids": [self.u_apple.to_string()]},
+                }
+            ],
         )
-        self.assertEquals(events[0], [{
-            "type": "m.typing",
-            "room_id": self.room_id,
-            "content": {
-                "user_ids": [self.u_apple.to_string()],
-            },
-        }])
diff --git a/tests/http/__init__.py b/tests/http/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/http/__init__.py
diff --git a/tests/http/test_endpoint.py b/tests/http/test_endpoint.py
new file mode 100644
index 0000000000..3b0155ed03
--- /dev/null
+++ b/tests/http/test_endpoint.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.http.endpoint import parse_and_validate_server_name, parse_server_name
+
+from tests import unittest
+
+
+class ServerNameTestCase(unittest.TestCase):
+    def test_parse_server_name(self):
+        test_data = {
+            'localhost': ('localhost', None),
+            'my-example.com:1234': ('my-example.com', 1234),
+            '1.2.3.4': ('1.2.3.4', None),
+            '[0abc:1def::1234]': ('[0abc:1def::1234]', None),
+            '1.2.3.4:1': ('1.2.3.4', 1),
+            '[0abc:1def::1234]:8080': ('[0abc:1def::1234]', 8080),
+        }
+
+        for i, o in test_data.items():
+            self.assertEqual(parse_server_name(i), o)
+
+    def test_validate_bad_server_names(self):
+        test_data = [
+            "",  # empty
+            "localhost:http",  # non-numeric port
+            "1234]",  # smells like ipv6 literal but isn't
+            "[1234",
+            "underscore_.com",
+            "percent%65.com",
+            "1234:5678:80",  # too many colons
+        ]
+        for i in test_data:
+            try:
+                parse_and_validate_server_name(i)
+                self.fail(
+                    "Expected parse_and_validate_server_name('%s') to throw" % (i,)
+                )
+            except ValueError:
+                pass
diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py
index 64e07a8c93..65df116efc 100644
--- a/tests/replication/slave/storage/_base.py
+++ b/tests/replication/slave/storage/_base.py
@@ -11,30 +11,54 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-from twisted.internet import defer, reactor
-from tests import unittest
-
 import tempfile
 
 from mock import Mock, NonCallableMock
-from tests.utils import setup_test_homeserver
-from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
+
+from twisted.internet import defer, reactor
+from twisted.internet.defer import Deferred
+
 from synapse.replication.tcp.client import (
-    ReplicationClientHandler, ReplicationClientFactory,
+    ReplicationClientFactory,
+    ReplicationClientHandler,
 )
+from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
+from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable
+
+from tests import unittest
+from tests.utils import setup_test_homeserver
+
+
+class TestReplicationClientHandler(ReplicationClientHandler):
+    """Overrides on_rdata so that we can wait for it to happen"""
+
+    def __init__(self, store):
+        super(TestReplicationClientHandler, self).__init__(store)
+        self._rdata_awaiters = []
+
+    def await_replication(self):
+        d = Deferred()
+        self._rdata_awaiters.append(d)
+        return make_deferred_yieldable(d)
+
+    def on_rdata(self, stream_name, token, rows):
+        awaiters = self._rdata_awaiters
+        self._rdata_awaiters = []
+        super(TestReplicationClientHandler, self).on_rdata(stream_name, token, rows)
+        with PreserveLoggingContext():
+            for a in awaiters:
+                a.callback(None)
 
 
 class BaseSlavedStoreTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def setUp(self):
         self.hs = yield setup_test_homeserver(
+            self.addCleanup,
             "blue",
             http_client=None,
             federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=[
-                "send_message",
-            ]),
+            ratelimiter=NonCallableMock(spec_set=["send_message"]),
         )
         self.hs.get_ratelimiter().send_message.return_value = (True, 0)
 
@@ -49,7 +73,7 @@ class BaseSlavedStoreTestCase(unittest.TestCase):
         self.addCleanup(listener.stopListening)
         self.streamer = server_factory.streamer
 
-        self.replication_handler = ReplicationClientHandler(self.slaved_store)
+        self.replication_handler = TestReplicationClientHandler(self.slaved_store)
         client_factory = ReplicationClientFactory(
             self.hs, "client_name", self.replication_handler
         )
@@ -57,12 +81,14 @@ class BaseSlavedStoreTestCase(unittest.TestCase):
         self.addCleanup(client_factory.stopTrying)
         self.addCleanup(client_connector.disconnect)
 
-    @defer.inlineCallbacks
     def replicate(self):
-        yield self.streamer.on_notifier_poke()
-        d = self.replication_handler.await_sync("replication_test")
-        self.streamer.send_sync_to_all_connections("replication_test")
-        yield d
+        """Tell the master side of replication that something has happened, and then
+        wait for the replication to occur.
+        """
+        # xxx: should we be more specific in what we wait for?
+        d = self.replication_handler.await_replication()
+        self.streamer.on_notifier_poke()
+        return d
 
     @defer.inlineCallbacks
     def check(self, method, args, expected_result=None):
diff --git a/tests/replication/slave/storage/test_account_data.py b/tests/replication/slave/storage/test_account_data.py
index da54d478ce..87cc2b2fba 100644
--- a/tests/replication/slave/storage/test_account_data.py
+++ b/tests/replication/slave/storage/test_account_data.py
@@ -13,11 +13,11 @@
 # limitations under the License.
 
 
-from ._base import BaseSlavedStoreTestCase
+from twisted.internet import defer
 
 from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
 
-from twisted.internet import defer
+from ._base import BaseSlavedStoreTestCase
 
 USER_ID = "@feeling:blue"
 TYPE = "my.type"
@@ -29,28 +29,14 @@ class SlavedAccountDataStoreTestCase(BaseSlavedStoreTestCase):
 
     @defer.inlineCallbacks
     def test_user_account_data(self):
-        yield self.master_store.add_account_data_for_user(
-            USER_ID, TYPE, {"a": 1}
-        )
+        yield self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 1})
         yield self.replicate()
         yield self.check(
-            "get_global_account_data_by_type_for_user",
-            [TYPE, USER_ID], {"a": 1}
-        )
-        yield self.check(
-            "get_global_account_data_by_type_for_users",
-            [TYPE, [USER_ID]], {USER_ID: {"a": 1}}
+            "get_global_account_data_by_type_for_user", [TYPE, USER_ID], {"a": 1}
         )
 
-        yield self.master_store.add_account_data_for_user(
-            USER_ID, TYPE, {"a": 2}
-        )
+        yield self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 2})
         yield self.replicate()
         yield self.check(
-            "get_global_account_data_by_type_for_user",
-            [TYPE, USER_ID], {"a": 2}
-        )
-        yield self.check(
-            "get_global_account_data_by_type_for_users",
-            [TYPE, [USER_ID]], {USER_ID: {"a": 2}}
+            "get_global_account_data_by_type_for_user", [TYPE, USER_ID], {"a": 2}
         )
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index cb058d3142..2ba80ccdcf 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -12,15 +12,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStoreTestCase
+from twisted.internet import defer
 
 from synapse.events import FrozenEvent, _EventInternalMetadata
 from synapse.events.snapshot import EventContext
 from synapse.replication.slave.storage.events import SlavedEventStore
 from synapse.storage.roommember import RoomsForUser
 
-from twisted.internet import defer
-
+from ._base import BaseSlavedStoreTestCase
 
 USER_ID = "@feeling:blue"
 USER_ID_2 = "@bright:blue"
@@ -39,6 +38,7 @@ def patch__eq__(cls):
     def unpatch():
         if eq is not None:
             cls.__eq__ = eq
+
     return unpatch
 
 
@@ -49,10 +49,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
     def setUp(self):
         # Patch up the equality operator for events so that we can check
         # whether lists of events match using assertEquals
-        self.unpatches = [
-            patch__eq__(_EventInternalMetadata),
-            patch__eq__(FrozenEvent),
-        ]
+        self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(FrozenEvent)]
         return super(SlavedEventStoreTestCase, self).setUp()
 
     def tearDown(self):
@@ -62,33 +59,27 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
     def test_get_latest_event_ids_in_room(self):
         create = yield self.persist(type="m.room.create", key="", creator=USER_ID)
         yield self.replicate()
-        yield self.check(
-            "get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id]
-        )
+        yield self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id])
 
         join = yield self.persist(
-            type="m.room.member", key=USER_ID, membership="join",
+            type="m.room.member",
+            key=USER_ID,
+            membership="join",
             prev_events=[(create.event_id, {})],
         )
         yield self.replicate()
-        yield self.check(
-            "get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id]
-        )
+        yield self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id])
 
     @defer.inlineCallbacks
     def test_redactions(self):
         yield self.persist(type="m.room.create", key="", creator=USER_ID)
         yield self.persist(type="m.room.member", key=USER_ID, membership="join")
 
-        msg = yield self.persist(
-            type="m.room.message", msgtype="m.text", body="Hello"
-        )
+        msg = yield self.persist(type="m.room.message", msgtype="m.text", body="Hello")
         yield self.replicate()
         yield self.check("get_event", [msg.event_id], msg)
 
-        redaction = yield self.persist(
-            type="m.room.redaction", redacts=msg.event_id
-        )
+        redaction = yield self.persist(type="m.room.redaction", redacts=msg.event_id)
         yield self.replicate()
 
         msg_dict = msg.get_dict()
@@ -103,9 +94,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
         yield self.persist(type="m.room.create", key="", creator=USER_ID)
         yield self.persist(type="m.room.member", key=USER_ID, membership="join")
 
-        msg = yield self.persist(
-            type="m.room.message", msgtype="m.text", body="Hello"
-        )
+        msg = yield self.persist(type="m.room.message", msgtype="m.text", body="Hello")
         yield self.replicate()
         yield self.check("get_event", [msg.event_id], msg)
 
@@ -123,19 +112,29 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
 
     @defer.inlineCallbacks
     def test_invites(self):
+        yield self.persist(type="m.room.create", key="", creator=USER_ID)
         yield self.check("get_invited_rooms_for_user", [USER_ID_2], [])
         event = yield self.persist(
             type="m.room.member", key=USER_ID_2, membership="invite"
         )
         yield self.replicate()
-        yield self.check("get_invited_rooms_for_user", [USER_ID_2], [RoomsForUser(
-            ROOM_ID, USER_ID, "invite", event.event_id,
-            event.internal_metadata.stream_ordering
-        )])
+        yield self.check(
+            "get_invited_rooms_for_user",
+            [USER_ID_2],
+            [
+                RoomsForUser(
+                    ROOM_ID,
+                    USER_ID,
+                    "invite",
+                    event.event_id,
+                    event.internal_metadata.stream_ordering,
+                )
+            ],
+        )
 
     @defer.inlineCallbacks
     def test_push_actions_for_user(self):
-        yield self.persist(type="m.room.create", creator=USER_ID)
+        yield self.persist(type="m.room.create", key="", creator=USER_ID)
         yield self.persist(type="m.room.join", key=USER_ID, membership="join")
         yield self.persist(
             type="m.room.join", sender=USER_ID, key=USER_ID_2, membership="join"
@@ -147,40 +146,55 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
         yield self.check(
             "get_unread_event_push_actions_by_room_for_user",
             [ROOM_ID, USER_ID_2, event1.event_id],
-            {"highlight_count": 0, "notify_count": 0}
+            {"highlight_count": 0, "notify_count": 0},
         )
 
         yield self.persist(
-            type="m.room.message", msgtype="m.text", body="world",
+            type="m.room.message",
+            msgtype="m.text",
+            body="world",
             push_actions=[(USER_ID_2, ["notify"])],
         )
         yield self.replicate()
         yield self.check(
             "get_unread_event_push_actions_by_room_for_user",
             [ROOM_ID, USER_ID_2, event1.event_id],
-            {"highlight_count": 0, "notify_count": 1}
+            {"highlight_count": 0, "notify_count": 1},
         )
 
         yield self.persist(
-            type="m.room.message", msgtype="m.text", body="world",
-            push_actions=[(USER_ID_2, [
-                "notify", {"set_tweak": "highlight", "value": True}
-            ])],
+            type="m.room.message",
+            msgtype="m.text",
+            body="world",
+            push_actions=[
+                (USER_ID_2, ["notify", {"set_tweak": "highlight", "value": True}])
+            ],
         )
         yield self.replicate()
         yield self.check(
             "get_unread_event_push_actions_by_room_for_user",
             [ROOM_ID, USER_ID_2, event1.event_id],
-            {"highlight_count": 1, "notify_count": 2}
+            {"highlight_count": 1, "notify_count": 2},
         )
 
     event_id = 0
 
     @defer.inlineCallbacks
     def persist(
-        self, sender=USER_ID, room_id=ROOM_ID, type={}, key=None, internal={},
-        state=None, reset_state=False, backfill=False,
-        depth=None, prev_events=[], auth_events=[], prev_state=[], redacts=None,
+        self,
+        sender=USER_ID,
+        room_id=ROOM_ID,
+        type={},
+        key=None,
+        internal={},
+        state=None,
+        reset_state=False,
+        backfill=False,
+        depth=None,
+        prev_events=[],
+        auth_events=[],
+        prev_state=[],
+        redacts=None,
         push_actions=[],
         **content
     ):
@@ -220,32 +234,23 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
         self.event_id += 1
 
         if state is not None:
-            state_ids = {
-                key: e.event_id for key, e in state.items()
-            }
-            context = EventContext()
-            context.current_state_ids = state_ids
-            context.prev_state_ids = state_ids
+            state_ids = {key: e.event_id for key, e in state.items()}
+            context = EventContext.with_state(
+                state_group=None, current_state_ids=state_ids, prev_state_ids=state_ids
+            )
         else:
             state_handler = self.hs.get_state_handler()
             context = yield state_handler.compute_event_context(event)
 
         yield self.master_store.add_push_actions_to_staging(
-            event.event_id, {
-                user_id: actions
-                for user_id, actions in push_actions
-            },
+            event.event_id, {user_id: actions for user_id, actions in push_actions}
         )
 
         ordering = None
         if backfill:
-            yield self.master_store.persist_events(
-                [(event, context)], backfilled=True
-            )
+            yield self.master_store.persist_events([(event, context)], backfilled=True)
         else:
-            ordering, _ = yield self.master_store.persist_event(
-                event, context,
-            )
+            ordering, _ = yield self.master_store.persist_event(event, context)
 
         if ordering:
             event.internal_metadata.stream_ordering = ordering
diff --git a/tests/replication/slave/storage/test_receipts.py b/tests/replication/slave/storage/test_receipts.py
index 6624fe4eea..ae1adeded1 100644
--- a/tests/replication/slave/storage/test_receipts.py
+++ b/tests/replication/slave/storage/test_receipts.py
@@ -12,11 +12,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import BaseSlavedStoreTestCase
+from twisted.internet import defer
 
 from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
 
-from twisted.internet import defer
+from ._base import BaseSlavedStoreTestCase
 
 USER_ID = "@feeling:blue"
 ROOM_ID = "!room:blue"
@@ -34,6 +34,6 @@ class SlavedReceiptTestCase(BaseSlavedStoreTestCase):
             ROOM_ID, "m.read", USER_ID, [EVENT_ID], {}
         )
         yield self.replicate()
-        yield self.check("get_receipts_for_user", [USER_ID, "m.read"], {
-            ROOM_ID: EVENT_ID
-        })
+        yield self.check(
+            "get_receipts_for_user", [USER_ID, "m.read"], {ROOM_ID: EVENT_ID}
+        )
diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py
index b5bc2fa255..708dc26e61 100644
--- a/tests/rest/client/test_transactions.py
+++ b/tests/rest/client/test_transactions.py
@@ -1,28 +1,29 @@
-from synapse.rest.client.transactions import HttpTransactionCache
-from synapse.rest.client.transactions import CLEANUP_PERIOD_MS
-from twisted.internet import defer
 from mock import Mock, call
 
-from synapse.util import async
+from twisted.internet import defer, reactor
+
+from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache
+from synapse.util import Clock
 from synapse.util.logcontext import LoggingContext
+
 from tests import unittest
 from tests.utils import MockClock
 
 
 class HttpTransactionCacheTestCase(unittest.TestCase):
-
     def setUp(self):
         self.clock = MockClock()
-        self.cache = HttpTransactionCache(self.clock)
+        self.hs = Mock()
+        self.hs.get_clock = Mock(return_value=self.clock)
+        self.hs.get_auth = Mock()
+        self.cache = HttpTransactionCache(self.hs)
 
         self.mock_http_response = (200, "GOOD JOB!")
         self.mock_key = "foo"
 
     @defer.inlineCallbacks
     def test_executes_given_function(self):
-        cb = Mock(
-            return_value=defer.succeed(self.mock_http_response)
-        )
+        cb = Mock(return_value=defer.succeed(self.mock_http_response))
         res = yield self.cache.fetch_or_execute(
             self.mock_key, cb, "some_arg", keyword="arg"
         )
@@ -31,9 +32,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_deduplicates_based_on_key(self):
-        cb = Mock(
-            return_value=defer.succeed(self.mock_http_response)
-        )
+        cb = Mock(return_value=defer.succeed(self.mock_http_response))
         for i in range(3):  # invoke multiple times
             res = yield self.cache.fetch_or_execute(
                 self.mock_key, cb, "some_arg", keyword="arg", changing_args=i
@@ -46,7 +45,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
     def test_logcontexts_with_async_result(self):
         @defer.inlineCallbacks
         def cb():
-            yield async.sleep(0)
+            yield Clock(reactor).sleep(0)
             defer.returnValue("yay")
 
         @defer.inlineCallbacks
@@ -81,7 +80,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
             try:
                 yield self.cache.fetch_or_execute(self.mock_key, cb)
             except Exception as e:
-                self.assertEqual(e.message, "boo")
+                self.assertEqual(e.args[0], "boo")
             self.assertIs(LoggingContext.current_context(), test_context)
 
             res = yield self.cache.fetch_or_execute(self.mock_key, cb)
@@ -107,7 +106,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
             try:
                 yield self.cache.fetch_or_execute(self.mock_key, cb)
             except Exception as e:
-                self.assertEqual(e.message, "boo")
+                self.assertEqual(e.args[0], "boo")
             self.assertIs(LoggingContext.current_context(), test_context)
 
             res = yield self.cache.fetch_or_execute(self.mock_key, cb)
@@ -116,29 +115,18 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_cleans_up(self):
-        cb = Mock(
-            return_value=defer.succeed(self.mock_http_response)
-        )
-        yield self.cache.fetch_or_execute(
-            self.mock_key, cb, "an arg"
-        )
+        cb = Mock(return_value=defer.succeed(self.mock_http_response))
+        yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg")
         # should NOT have cleaned up yet
         self.clock.advance_time_msec(CLEANUP_PERIOD_MS / 2)
 
-        yield self.cache.fetch_or_execute(
-            self.mock_key, cb, "an arg"
-        )
+        yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg")
         # still using cache
         cb.assert_called_once_with("an arg")
 
         self.clock.advance_time_msec(CLEANUP_PERIOD_MS)
 
-        yield self.cache.fetch_or_execute(
-            self.mock_key, cb, "an arg"
-        )
+        yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg")
         # no longer using cache
         self.assertEqual(cb.call_count, 2)
-        self.assertEqual(
-            cb.call_args_list,
-            [call("an arg",), call("an arg",)]
-        )
+        self.assertEqual(cb.call_args_list, [call("an arg"), call("an arg")])
diff --git a/tests/rest/client/v1/test_admin.py b/tests/rest/client/v1/test_admin.py
new file mode 100644
index 0000000000..1a553fa3f9
--- /dev/null
+++ b/tests/rest/client/v1/test_admin.py
@@ -0,0 +1,308 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import hmac
+import json
+
+from mock import Mock
+
+from synapse.http.server import JsonResource
+from synapse.rest.client.v1.admin import register_servlets
+from synapse.util import Clock
+
+from tests import unittest
+from tests.server import (
+    ThreadedMemoryReactorClock,
+    make_request,
+    render,
+    setup_test_homeserver,
+)
+
+
+class UserRegisterTestCase(unittest.TestCase):
+    def setUp(self):
+
+        self.clock = ThreadedMemoryReactorClock()
+        self.hs_clock = Clock(self.clock)
+        self.url = "/_matrix/client/r0/admin/register"
+
+        self.registration_handler = Mock()
+        self.identity_handler = Mock()
+        self.login_handler = Mock()
+        self.device_handler = Mock()
+        self.device_handler.check_device_registered = Mock(return_value="FAKE")
+
+        self.datastore = Mock(return_value=Mock())
+        self.datastore.get_current_state_deltas = Mock(return_value=[])
+
+        self.secrets = Mock()
+
+        self.hs = setup_test_homeserver(
+            self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
+        )
+
+        self.hs.config.registration_shared_secret = u"shared"
+
+        self.hs.get_media_repository = Mock()
+        self.hs.get_deactivate_account_handler = Mock()
+
+        self.resource = JsonResource(self.hs)
+        register_servlets(self.hs, self.resource)
+
+    def test_disabled(self):
+        """
+        If there is no shared secret, registration through this method will be
+        prevented.
+        """
+        self.hs.config.registration_shared_secret = None
+
+        request, channel = make_request("POST", self.url, b'{}')
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(
+            'Shared secret registration is not enabled', channel.json_body["error"]
+        )
+
+    def test_get_nonce(self):
+        """
+        Calling GET on the endpoint will return a randomised nonce, using the
+        homeserver's secrets provider.
+        """
+        secrets = Mock()
+        secrets.token_hex = Mock(return_value="abcd")
+
+        self.hs.get_secrets = Mock(return_value=secrets)
+
+        request, channel = make_request("GET", self.url)
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(channel.json_body, {"nonce": "abcd"})
+
+    def test_expired_nonce(self):
+        """
+        Calling GET on the endpoint will return a randomised nonce, which will
+        only last for SALT_TIMEOUT (60s).
+        """
+        request, channel = make_request("GET", self.url)
+        render(request, self.resource, self.clock)
+        nonce = channel.json_body["nonce"]
+
+        # 59 seconds
+        self.clock.advance(59)
+
+        body = json.dumps({"nonce": nonce})
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('username must be specified', channel.json_body["error"])
+
+        # 61 seconds
+        self.clock.advance(2)
+
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('unrecognised nonce', channel.json_body["error"])
+
+    def test_register_incorrect_nonce(self):
+        """
+        Only the provided nonce can be used, as it's checked in the MAC.
+        """
+        request, channel = make_request("GET", self.url)
+        render(request, self.resource, self.clock)
+        nonce = channel.json_body["nonce"]
+
+        want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+        want_mac.update(b"notthenonce\x00bob\x00abc123\x00admin")
+        want_mac = want_mac.hexdigest()
+
+        body = json.dumps(
+            {
+                "nonce": nonce,
+                "username": "bob",
+                "password": "abc123",
+                "admin": True,
+                "mac": want_mac,
+            }
+        )
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("HMAC incorrect", channel.json_body["error"])
+
+    def test_register_correct_nonce(self):
+        """
+        When the correct nonce is provided, and the right key is provided, the
+        user is registered.
+        """
+        request, channel = make_request("GET", self.url)
+        render(request, self.resource, self.clock)
+        nonce = channel.json_body["nonce"]
+
+        want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+        want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin")
+        want_mac = want_mac.hexdigest()
+
+        body = json.dumps(
+            {
+                "nonce": nonce,
+                "username": "bob",
+                "password": "abc123",
+                "admin": True,
+                "mac": want_mac,
+            }
+        )
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["user_id"])
+
+    def test_nonce_reuse(self):
+        """
+        A valid unrecognised nonce.
+        """
+        request, channel = make_request("GET", self.url)
+        render(request, self.resource, self.clock)
+        nonce = channel.json_body["nonce"]
+
+        want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+        want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin")
+        want_mac = want_mac.hexdigest()
+
+        body = json.dumps(
+            {
+                "nonce": nonce,
+                "username": "bob",
+                "password": "abc123",
+                "admin": True,
+                "mac": want_mac,
+            }
+        )
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["user_id"])
+
+        # Now, try and reuse it
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('unrecognised nonce', channel.json_body["error"])
+
+    def test_missing_parts(self):
+        """
+        Synapse will complain if you don't give nonce, username, password, and
+        mac.  Admin is optional.  Additional checks are done for length and
+        type.
+        """
+
+        def nonce():
+            request, channel = make_request("GET", self.url)
+            render(request, self.resource, self.clock)
+            return channel.json_body["nonce"]
+
+        #
+        # Nonce check
+        #
+
+        # Must be present
+        body = json.dumps({})
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('nonce must be specified', channel.json_body["error"])
+
+        #
+        # Username checks
+        #
+
+        # Must be present
+        body = json.dumps({"nonce": nonce()})
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('username must be specified', channel.json_body["error"])
+
+        # Must be a string
+        body = json.dumps({"nonce": nonce(), "username": 1234})
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('Invalid username', channel.json_body["error"])
+
+        # Must not have null bytes
+        body = json.dumps({"nonce": nonce(), "username": u"abcd\u0000"})
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('Invalid username', channel.json_body["error"])
+
+        # Must not have null bytes
+        body = json.dumps({"nonce": nonce(), "username": "a" * 1000})
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('Invalid username', channel.json_body["error"])
+
+        #
+        # Username checks
+        #
+
+        # Must be present
+        body = json.dumps({"nonce": nonce(), "username": "a"})
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('password must be specified', channel.json_body["error"])
+
+        # Must be a string
+        body = json.dumps({"nonce": nonce(), "username": "a", "password": 1234})
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('Invalid password', channel.json_body["error"])
+
+        # Must not have null bytes
+        body = json.dumps(
+            {"nonce": nonce(), "username": "a", "password": u"abcd\u0000"}
+        )
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('Invalid password', channel.json_body["error"])
+
+        # Super long
+        body = json.dumps({"nonce": nonce(), "username": "a", "password": "A" * 1000})
+        request, channel = make_request("POST", self.url, body.encode('utf8'))
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual('Invalid password', channel.json_body["error"])
diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py
index f5a7258e68..956f7fc4c4 100644
--- a/tests/rest/client/v1/test_events.py
+++ b/tests/rest/client/v1/test_events.py
@@ -14,110 +14,37 @@
 # limitations under the License.
 
 """ Tests REST events for /events paths."""
-from tests import unittest
 
-# twisted imports
-from twisted.internet import defer
-
-import synapse.rest.client.v1.events
-import synapse.rest.client.v1.register
-import synapse.rest.client.v1.room
+from mock import Mock, NonCallableMock
+from six import PY3
 
+from twisted.internet import defer
 
 from ....utils import MockHttpResource, setup_test_homeserver
 from .utils import RestTestCase
 
-from mock import Mock, NonCallableMock
-
-
 PATH_PREFIX = "/_matrix/client/api/v1"
 
 
-class EventStreamPaginationApiTestCase(unittest.TestCase):
-    """ Tests event streaming query parameters and start/end keys used in the
-    Pagination stream API. """
-    user_id = "sid1"
-
-    def setUp(self):
-        # configure stream and inject items
-        pass
-
-    def tearDown(self):
-        pass
-
-    def TODO_test_long_poll(self):
-        # stream from 'end' key, send (self+other) message, expect message.
-
-        # stream from 'END', send (self+other) message, expect message.
-
-        # stream from 'end' key, send (self+other) topic, expect topic.
-
-        # stream from 'END', send (self+other) topic, expect topic.
-
-        # stream from 'end' key, send (self+other) invite, expect invite.
-
-        # stream from 'END', send (self+other) invite, expect invite.
-
-        pass
-
-    def TODO_test_stream_forward(self):
-        # stream from START, expect injected items
-
-        # stream from 'start' key, expect same content
-
-        # stream from 'end' key, expect nothing
-
-        # stream from 'END', expect nothing
-
-        # The following is needed for cases where content is removed e.g. you
-        # left a room, so the token you're streaming from is > the one that
-        # would be returned naturally from START>END.
-        # stream from very new token (higher than end key), expect same token
-        # returned as end key
-        pass
-
-    def TODO_test_limits(self):
-        # stream from a key, expect limit_num items
-
-        # stream from START, expect limit_num items
-
-        pass
-
-    def TODO_test_range(self):
-        # stream from key to key, expect X items
-
-        # stream from key to END, expect X items
-
-        # stream from START to key, expect X items
-
-        # stream from START to END, expect all items
-        pass
-
-    def TODO_test_direction(self):
-        # stream from END to START and fwds, expect newest first
-
-        # stream from END to START and bwds, expect oldest first
-
-        # stream from START to END and fwds, expect oldest first
-
-        # stream from START to END and bwds, expect newest first
-
-        pass
-
-
 class EventStreamPermissionsTestCase(RestTestCase):
     """ Tests event streaming (GET /events). """
 
+    if PY3:
+        skip = "Skip on Py3 until ported to use not V1 only register."
+
     @defer.inlineCallbacks
     def setUp(self):
+        import synapse.rest.client.v1.events
+        import synapse.rest.client.v1_only.register
+        import synapse.rest.client.v1.room
+
         self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
 
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             http_client=None,
             federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=[
-                "send_message",
-            ]),
+            ratelimiter=NonCallableMock(spec_set=["send_message"]),
         )
         self.ratelimiter = hs.get_ratelimiter()
         self.ratelimiter.send_message.return_value = (True, 0)
@@ -127,7 +54,7 @@ class EventStreamPermissionsTestCase(RestTestCase):
 
         hs.get_handlers().federation_handler = Mock()
 
-        synapse.rest.client.v1.register.register_servlets(hs, self.mock_resource)
+        synapse.rest.client.v1_only.register.register_servlets(hs, self.mock_resource)
         synapse.rest.client.v1.events.register_servlets(hs, self.mock_resource)
         synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
 
@@ -155,7 +82,7 @@ class EventStreamPermissionsTestCase(RestTestCase):
         # behaviour is used instead to be consistent with the r0 spec.
         # see issue #2602
         (code, response) = yield self.mock_resource.trigger_get(
-            "/events?access_token=%s" % ("invalid" + self.token, )
+            "/events?access_token=%s" % ("invalid" + self.token,)
         )
         self.assertEquals(401, code, msg=str(response))
 
@@ -170,18 +97,12 @@ class EventStreamPermissionsTestCase(RestTestCase):
 
     @defer.inlineCallbacks
     def test_stream_room_permissions(self):
-        room_id = yield self.create_room_as(
-            self.other_user,
-            tok=self.other_token
-        )
+        room_id = yield self.create_room_as(self.other_user, tok=self.other_token)
         yield self.send(room_id, tok=self.other_token)
 
         # invited to room (expect no content for room)
         yield self.invite(
-            room_id,
-            src=self.other_user,
-            targ=self.user_id,
-            tok=self.other_token
+            room_id, src=self.other_user, targ=self.user_id, tok=self.other_token
         )
 
         (code, response) = yield self.mock_resource.trigger_get(
@@ -192,13 +113,16 @@ class EventStreamPermissionsTestCase(RestTestCase):
         # We may get a presence event for ourselves down
         self.assertEquals(
             0,
-            len([
-                c for c in response["chunk"]
-                if not (
-                    c.get("type") == "m.presence"
-                    and c["content"].get("user_id") == self.user_id
-                )
-            ])
+            len(
+                [
+                    c
+                    for c in response["chunk"]
+                    if not (
+                        c.get("type") == "m.presence"
+                        and c["content"].get("user_id") == self.user_id
+                    )
+                ]
+            ),
         )
 
         # joined room (expect all content for room)
diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py
new file mode 100644
index 0000000000..66c2b68707
--- /dev/null
+++ b/tests/rest/client/v1/test_presence.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from synapse.rest.client.v1 import presence
+from synapse.types import UserID
+
+from tests import unittest
+
+
+class PresenceTestCase(unittest.HomeserverTestCase):
+    """ Tests presence REST API. """
+
+    user_id = "@sid:red"
+
+    user = UserID.from_string(user_id)
+    servlets = [presence.register_servlets]
+
+    def make_homeserver(self, reactor, clock):
+
+        hs = self.setup_test_homeserver(
+            "red", http_client=None, federation_client=Mock()
+        )
+
+        hs.presence_handler = Mock()
+
+        return hs
+
+    def test_put_presence(self):
+        """
+        PUT to the status endpoint with use_presence enabled will call
+        set_state on the presence handler.
+        """
+        self.hs.config.use_presence = True
+
+        body = {"presence": "here", "status_msg": "beep boop"}
+        request, channel = self.make_request(
+            "PUT", "/presence/%s/status" % (self.user_id,), body
+        )
+        self.render(request)
+
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(self.hs.presence_handler.set_state.call_count, 1)
+
+    def test_put_presence_disabled(self):
+        """
+        PUT to the status endpoint with use_presence disbled will NOT call
+        set_state on the presence handler.
+        """
+        self.hs.config.use_presence = False
+
+        body = {"presence": "here", "status_msg": "beep boop"}
+        request, channel = self.make_request(
+            "PUT", "/presence/%s/status" % (self.user_id,), body
+        )
+        self.render(request)
+
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(self.hs.presence_handler.set_state.call_count, 0)
diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py
index dc94b8bd19..1eab9c3bdb 100644
--- a/tests/rest/client/v1/test_profile.py
+++ b/tests/rest/client/v1/test_profile.py
@@ -15,12 +15,15 @@
 
 """Tests REST events for /profile paths."""
 from mock import Mock
+
 from twisted.internet import defer
 
 import synapse.types
-from synapse.api.errors import SynapseError, AuthError
+from synapse.api.errors import AuthError, SynapseError
 from synapse.rest.client.v1 import profile
+
 from tests import unittest
+
 from ....utils import MockHttpResource, setup_test_homeserver
 
 myid = "@1234ABCD:test"
@@ -33,20 +36,23 @@ class ProfileTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def setUp(self):
         self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
-        self.mock_handler = Mock(spec=[
-            "get_displayname",
-            "set_displayname",
-            "get_avatar_url",
-            "set_avatar_url",
-        ])
+        self.mock_handler = Mock(
+            spec=[
+                "get_displayname",
+                "set_displayname",
+                "get_avatar_url",
+                "set_avatar_url",
+            ]
+        )
 
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             "test",
             http_client=None,
             resource_for_client=self.mock_resource,
             federation=Mock(),
             federation_client=Mock(),
-            profile_handler=self.mock_handler
+            profile_handler=self.mock_handler,
         )
 
         def _get_user_by_req(request=None, allow_guest=False):
@@ -75,9 +81,7 @@ class ProfileTestCase(unittest.TestCase):
         mocked_set.return_value = defer.succeed(())
 
         (code, response) = yield self.mock_resource.trigger(
-            "PUT",
-            "/profile/%s/displayname" % (myid),
-            '{"displayname": "Frank Jr."}'
+            "PUT", "/profile/%s/displayname" % (myid), b'{"displayname": "Frank Jr."}'
         )
 
         self.assertEquals(200, code)
@@ -91,14 +95,12 @@ class ProfileTestCase(unittest.TestCase):
         mocked_set.side_effect = AuthError(400, "message")
 
         (code, response) = yield self.mock_resource.trigger(
-            "PUT", "/profile/%s/displayname" % ("@4567:test"),
-            '{"displayname": "Frank Jr."}'
+            "PUT",
+            "/profile/%s/displayname" % ("@4567:test"),
+            b'{"displayname": "Frank Jr."}',
         )
 
-        self.assertTrue(
-            400 <= code < 499,
-            msg="code %d is in the 4xx range" % (code)
-        )
+        self.assertTrue(400 <= code < 499, msg="code %d is in the 4xx range" % (code))
 
     @defer.inlineCallbacks
     def test_get_other_name(self):
@@ -118,14 +120,12 @@ class ProfileTestCase(unittest.TestCase):
         mocked_set.side_effect = SynapseError(400, "message")
 
         (code, response) = yield self.mock_resource.trigger(
-            "PUT", "/profile/%s/displayname" % ("@opaque:elsewhere"),
-            '{"displayname":"bob"}'
+            "PUT",
+            "/profile/%s/displayname" % ("@opaque:elsewhere"),
+            b'{"displayname":"bob"}',
         )
 
-        self.assertTrue(
-            400 <= code <= 499,
-            msg="code %d is in the 4xx range" % (code)
-        )
+        self.assertTrue(400 <= code <= 499, msg="code %d is in the 4xx range" % (code))
 
     @defer.inlineCallbacks
     def test_get_my_avatar(self):
@@ -148,7 +148,7 @@ class ProfileTestCase(unittest.TestCase):
         (code, response) = yield self.mock_resource.trigger(
             "PUT",
             "/profile/%s/avatar_url" % (myid),
-            '{"avatar_url": "http://my.server/pic.gif"}'
+            b'{"avatar_url": "http://my.server/pic.gif"}',
         )
 
         self.assertEquals(200, code)
diff --git a/tests/rest/client/v1/test_register.py b/tests/rest/client/v1/test_register.py
index a6a4e2ffe0..6b7ff813d5 100644
--- a/tests/rest/client/v1/test_register.py
+++ b/tests/rest/client/v1/test_register.py
@@ -13,26 +13,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.rest.client.v1.register import CreateUserRestServlet
-from twisted.internet import defer
+import json
+
 from mock import Mock
+from six import PY3
+
+from twisted.test.proto_helpers import MemoryReactorClock
+
+from synapse.http.server import JsonResource
+from synapse.rest.client.v1_only.register import register_servlets
+from synapse.util import Clock
+
 from tests import unittest
-from tests.utils import mock_getRawHeaders
-import json
+from tests.server import make_request, render, setup_test_homeserver
 
 
 class CreateUserServletTestCase(unittest.TestCase):
+    """
+    Tests for CreateUserRestServlet.
+    """
 
-    def setUp(self):
-        # do the dance to hook up request data to self.request_data
-        self.request_data = ""
-        self.request = Mock(
-            content=Mock(read=Mock(side_effect=lambda: self.request_data)),
-            path='/_matrix/client/api/v1/createUser'
-        )
-        self.request.args = {}
-        self.request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+    if PY3:
+        skip = "Not ported to Python 3."
 
+    def setUp(self):
         self.registration_handler = Mock()
 
         self.appservice = Mock(sender="@as:test")
@@ -40,39 +44,46 @@ class CreateUserServletTestCase(unittest.TestCase):
             get_app_service_by_token=Mock(return_value=self.appservice)
         )
 
-        # do the dance to hook things up to the hs global
-        handlers = Mock(
-            registration_handler=self.registration_handler,
+        handlers = Mock(registration_handler=self.registration_handler)
+        self.clock = MemoryReactorClock()
+        self.hs_clock = Clock(self.clock)
+
+        self.hs = self.hs = setup_test_homeserver(
+            self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
         )
-        self.hs = Mock()
-        self.hs.hostname = "superbig~testing~thing.com"
         self.hs.get_datastore = Mock(return_value=self.datastore)
         self.hs.get_handlers = Mock(return_value=handlers)
-        self.servlet = CreateUserRestServlet(self.hs)
 
-    @defer.inlineCallbacks
     def test_POST_createuser_with_valid_user(self):
+
+        res = JsonResource(self.hs)
+        register_servlets(self.hs, res)
+
+        request_data = json.dumps(
+            {
+                "localpart": "someone",
+                "displayname": "someone interesting",
+                "duration_seconds": 200,
+            }
+        )
+
+        url = b'/_matrix/client/api/v1/createUser?access_token=i_am_an_app_service'
+
         user_id = "@someone:interesting"
         token = "my token"
-        self.request.args = {
-            "access_token": "i_am_an_app_service"
-        }
-        self.request_data = json.dumps({
-            "localpart": "someone",
-            "displayname": "someone interesting",
-            "duration_seconds": 200
-        })
 
         self.registration_handler.get_or_create_user = Mock(
             return_value=(user_id, token)
         )
 
-        (code, result) = yield self.servlet.on_POST(self.request)
-        self.assertEquals(code, 200)
+        request, channel = make_request(b"POST", url, request_data)
+        render(request, res, self.clock)
+
+        self.assertEquals(channel.result["code"], b"200")
 
         det_data = {
             "user_id": user_id,
             "access_token": token,
-            "home_server": self.hs.hostname
+            "home_server": self.hs.hostname,
         }
-        self.assertDictContainsSubset(det_data, result)
+        self.assertDictContainsSubset(det_data, json.loads(channel.result["body"]))
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index 61d737725b..9fe0760496 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -15,960 +15,783 @@
 
 """Tests REST events for /rooms paths."""
 
-# twisted imports
+import json
+
+from mock import Mock, NonCallableMock
+from six.moves.urllib import parse as urlparse
+
 from twisted.internet import defer
 
 import synapse.rest.client.v1.room
 from synapse.api.constants import Membership
-
+from synapse.http.server import JsonResource
 from synapse.types import UserID
+from synapse.util import Clock
 
-import json
-from six.moves.urllib import parse as urlparse
-
-from ....utils import MockHttpResource, setup_test_homeserver
-from .utils import RestTestCase
+from tests import unittest
+from tests.server import (
+    ThreadedMemoryReactorClock,
+    make_request,
+    render,
+    setup_test_homeserver,
+)
 
-from mock import Mock, NonCallableMock
+from .utils import RestHelper
 
-PATH_PREFIX = "/_matrix/client/api/v1"
+PATH_PREFIX = b"/_matrix/client/api/v1"
 
 
-class RoomPermissionsTestCase(RestTestCase):
-    """ Tests room permissions. """
-    user_id = "@sid1:red"
-    rmcreator_id = "@notme:red"
+class RoomBase(unittest.TestCase):
+    rmcreator_id = None
 
-    @defer.inlineCallbacks
     def setUp(self):
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
 
-        hs = yield setup_test_homeserver(
+        self.clock = ThreadedMemoryReactorClock()
+        self.hs_clock = Clock(self.clock)
+
+        self.hs = setup_test_homeserver(
+            self.addCleanup,
             "red",
             http_client=None,
+            clock=self.hs_clock,
+            reactor=self.clock,
             federation_client=Mock(),
             ratelimiter=NonCallableMock(spec_set=["send_message"]),
         )
-        self.ratelimiter = hs.get_ratelimiter()
+        self.ratelimiter = self.hs.get_ratelimiter()
         self.ratelimiter.send_message.return_value = (True, 0)
 
-        hs.get_handlers().federation_handler = Mock()
+        self.hs.get_federation_handler = Mock(return_value=Mock())
 
         def get_user_by_access_token(token=None, allow_guest=False):
             return {
-                "user": UserID.from_string(self.auth_user_id),
+                "user": UserID.from_string(self.helper.auth_user_id),
                 "token_id": 1,
                 "is_guest": False,
             }
-        hs.get_auth().get_user_by_access_token = get_user_by_access_token
+
+        def get_user_by_req(request, allow_guest=False, rights="access"):
+            return synapse.types.create_requester(
+                UserID.from_string(self.helper.auth_user_id), 1, False, None
+            )
+
+        self.hs.get_auth().get_user_by_req = get_user_by_req
+        self.hs.get_auth().get_user_by_access_token = get_user_by_access_token
+        self.hs.get_auth().get_access_token_from_request = Mock(return_value=b"1234")
 
         def _insert_client_ip(*args, **kwargs):
             return defer.succeed(None)
-        hs.get_datastore().insert_client_ip = _insert_client_ip
 
-        self.auth_user_id = self.rmcreator_id
+        self.hs.get_datastore().insert_client_ip = _insert_client_ip
 
-        synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+        self.resource = JsonResource(self.hs)
+        synapse.rest.client.v1.room.register_servlets(self.hs, self.resource)
+        synapse.rest.client.v1.room.register_deprecated_servlets(self.hs, self.resource)
+        self.helper = RestHelper(self.hs, self.resource, self.user_id)
 
-        self.auth = hs.get_auth()
 
-        # create some rooms under the name rmcreator_id
-        self.uncreated_rmid = "!aa:test"
+class RoomPermissionsTestCase(RoomBase):
+    """ Tests room permissions. """
+
+    user_id = b"@sid1:red"
+    rmcreator_id = b"@notme:red"
 
-        self.created_rmid = yield self.create_room_as(self.rmcreator_id,
-                                                      is_public=False)
+    def setUp(self):
+
+        super(RoomPermissionsTestCase, self).setUp()
 
-        self.created_public_rmid = yield self.create_room_as(self.rmcreator_id,
-                                                             is_public=True)
+        self.helper.auth_user_id = self.rmcreator_id
+        # create some rooms under the name rmcreator_id
+        self.uncreated_rmid = "!aa:test"
+        self.created_rmid = self.helper.create_room_as(
+            self.rmcreator_id, is_public=False
+        )
+        self.created_public_rmid = self.helper.create_room_as(
+            self.rmcreator_id, is_public=True
+        )
 
         # send a message in one of the rooms
         self.created_rmid_msg_path = (
-            "/rooms/%s/send/m.room.message/a1" % (self.created_rmid)
-        )
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT",
+            "rooms/%s/send/m.room.message/a1" % (self.created_rmid)
+        ).encode('ascii')
+        request, channel = make_request(
+            b"PUT",
             self.created_rmid_msg_path,
-            '{"msgtype":"m.text","body":"test msg"}'
+            b'{"msgtype":"m.text","body":"test msg"}',
         )
-        self.assertEquals(200, code, msg=str(response))
+        render(request, self.resource, self.clock)
+        self.assertEquals(channel.result["code"], b"200", channel.result)
 
         # set topic for public room
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT",
-            "/rooms/%s/state/m.room.topic" % self.created_public_rmid,
-            '{"topic":"Public Room Topic"}'
+        request, channel = make_request(
+            b"PUT",
+            ("rooms/%s/state/m.room.topic" % self.created_public_rmid).encode('ascii'),
+            b'{"topic":"Public Room Topic"}',
         )
-        self.assertEquals(200, code, msg=str(response))
+        render(request, self.resource, self.clock)
+        self.assertEquals(channel.result["code"], b"200", channel.result)
 
         # auth as user_id now
-        self.auth_user_id = self.user_id
-
-    def tearDown(self):
-        pass
+        self.helper.auth_user_id = self.user_id
 
-    @defer.inlineCallbacks
     def test_send_message(self):
-        msg_content = '{"msgtype":"m.text","body":"hello"}'
-        send_msg_path = (
-            "/rooms/%s/send/m.room.message/mid1" % (self.created_rmid,)
-        )
+        msg_content = b'{"msgtype":"m.text","body":"hello"}'
+
+        seq = iter(range(100))
+
+        def send_msg_path():
+            return b"/rooms/%s/send/m.room.message/mid%s" % (
+                self.created_rmid,
+                str(next(seq)).encode('ascii'),
+            )
 
         # send message in uncreated room, expect 403
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT",
-            "/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,),
-            msg_content
+        request, channel = make_request(
+            b"PUT",
+            b"/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,),
+            msg_content,
         )
-        self.assertEquals(403, code, msg=str(response))
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
         # send message in created room not joined (no state), expect 403
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT",
-            send_msg_path,
-            msg_content
-        )
-        self.assertEquals(403, code, msg=str(response))
+        request, channel = make_request(b"PUT", send_msg_path(), msg_content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
         # send message in created room and invited, expect 403
-        yield self.invite(
-            room=self.created_rmid,
-            src=self.rmcreator_id,
-            targ=self.user_id
-        )
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT",
-            send_msg_path,
-            msg_content
+        self.helper.invite(
+            room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id
         )
-        self.assertEquals(403, code, msg=str(response))
+        request, channel = make_request(b"PUT", send_msg_path(), msg_content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
         # send message in created room and joined, expect 200
-        yield self.join(room=self.created_rmid, user=self.user_id)
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT",
-            send_msg_path,
-            msg_content
-        )
-        self.assertEquals(200, code, msg=str(response))
+        self.helper.join(room=self.created_rmid, user=self.user_id)
+        request, channel = make_request(b"PUT", send_msg_path(), msg_content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
         # send message in created room and left, expect 403
-        yield self.leave(room=self.created_rmid, user=self.user_id)
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT",
-            send_msg_path,
-            msg_content
-        )
-        self.assertEquals(403, code, msg=str(response))
+        self.helper.leave(room=self.created_rmid, user=self.user_id)
+        request, channel = make_request(b"PUT", send_msg_path(), msg_content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
-    @defer.inlineCallbacks
     def test_topic_perms(self):
-        topic_content = '{"topic":"My Topic Name"}'
-        topic_path = "/rooms/%s/state/m.room.topic" % self.created_rmid
+        topic_content = b'{"topic":"My Topic Name"}'
+        topic_path = b"/rooms/%s/state/m.room.topic" % self.created_rmid
 
         # set/get topic in uncreated room, expect 403
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid,
-            topic_content
+        request, channel = make_request(
+            b"PUT", b"/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content
         )
-        self.assertEquals(403, code, msg=str(response))
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/rooms/%s/state/m.room.topic" % self.uncreated_rmid
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+        request, channel = make_request(
+            b"GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid
         )
-        self.assertEquals(403, code, msg=str(response))
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
         # set/get topic in created PRIVATE room not joined, expect 403
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", topic_path, topic_content
-        )
-        self.assertEquals(403, code, msg=str(response))
-        (code, response) = yield self.mock_resource.trigger_get(topic_path)
-        self.assertEquals(403, code, msg=str(response))
+        request, channel = make_request(b"PUT", topic_path, topic_content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+        request, channel = make_request(b"GET", topic_path)
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
         # set topic in created PRIVATE room and invited, expect 403
-        yield self.invite(
+        self.helper.invite(
             room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id
         )
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", topic_path, topic_content
-        )
-        self.assertEquals(403, code, msg=str(response))
+        request, channel = make_request(b"PUT", topic_path, topic_content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
         # get topic in created PRIVATE room and invited, expect 403
-        (code, response) = yield self.mock_resource.trigger_get(topic_path)
-        self.assertEquals(403, code, msg=str(response))
+        request, channel = make_request(b"GET", topic_path)
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
         # set/get topic in created PRIVATE room and joined, expect 200
-        yield self.join(room=self.created_rmid, user=self.user_id)
+        self.helper.join(room=self.created_rmid, user=self.user_id)
 
         # Only room ops can set topic by default
-        self.auth_user_id = self.rmcreator_id
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", topic_path, topic_content
-        )
-        self.assertEquals(200, code, msg=str(response))
-        self.auth_user_id = self.user_id
+        self.helper.auth_user_id = self.rmcreator_id
+        request, channel = make_request(b"PUT", topic_path, topic_content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.helper.auth_user_id = self.user_id
 
-        (code, response) = yield self.mock_resource.trigger_get(topic_path)
-        self.assertEquals(200, code, msg=str(response))
-        self.assert_dict(json.loads(topic_content), response)
+        request, channel = make_request(b"GET", topic_path)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assert_dict(json.loads(topic_content), channel.json_body)
 
         # set/get topic in created PRIVATE room and left, expect 403
-        yield self.leave(room=self.created_rmid, user=self.user_id)
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", topic_path, topic_content
-        )
-        self.assertEquals(403, code, msg=str(response))
-        (code, response) = yield self.mock_resource.trigger_get(topic_path)
-        self.assertEquals(200, code, msg=str(response))
+        self.helper.leave(room=self.created_rmid, user=self.user_id)
+        request, channel = make_request(b"PUT", topic_path, topic_content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+        request, channel = make_request(b"GET", topic_path)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
         # get topic in PUBLIC room, not joined, expect 403
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/rooms/%s/state/m.room.topic" % self.created_public_rmid
+        request, channel = make_request(
+            b"GET", b"/rooms/%s/state/m.room.topic" % self.created_public_rmid
         )
-        self.assertEquals(403, code, msg=str(response))
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
         # set topic in PUBLIC room, not joined, expect 403
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT",
-            "/rooms/%s/state/m.room.topic" % self.created_public_rmid,
-            topic_content
+        request, channel = make_request(
+            b"PUT",
+            b"/rooms/%s/state/m.room.topic" % self.created_public_rmid,
+            topic_content,
         )
-        self.assertEquals(403, code, msg=str(response))
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
-    @defer.inlineCallbacks
     def _test_get_membership(self, room=None, members=[], expect_code=None):
         for member in members:
-            path = "/rooms/%s/state/m.room.member/%s" % (room, member)
-            (code, response) = yield self.mock_resource.trigger_get(path)
-            self.assertEquals(expect_code, code)
+            path = b"/rooms/%s/state/m.room.member/%s" % (room, member)
+            request, channel = make_request(b"GET", path)
+            render(request, self.resource, self.clock)
+            self.assertEquals(expect_code, int(channel.result["code"]))
 
-    @defer.inlineCallbacks
     def test_membership_basic_room_perms(self):
         # === room does not exist ===
         room = self.uncreated_rmid
         # get membership of self, get membership of other, uncreated room
         # expect all 403s
-        yield self._test_get_membership(
-            members=[self.user_id, self.rmcreator_id],
-            room=room, expect_code=403)
+        self._test_get_membership(
+            members=[self.user_id, self.rmcreator_id], room=room, expect_code=403
+        )
 
         # trying to invite people to this room should 403
-        yield self.invite(room=room, src=self.user_id, targ=self.rmcreator_id,
-                          expect_code=403)
+        self.helper.invite(
+            room=room, src=self.user_id, targ=self.rmcreator_id, expect_code=403
+        )
 
         # set [invite/join/left] of self, set [invite/join/left] of other,
         # expect all 404s because room doesn't exist on any server
         for usr in [self.user_id, self.rmcreator_id]:
-            yield self.join(room=room, user=usr, expect_code=404)
-            yield self.leave(room=room, user=usr, expect_code=404)
+            self.helper.join(room=room, user=usr, expect_code=404)
+            self.helper.leave(room=room, user=usr, expect_code=404)
 
-    @defer.inlineCallbacks
     def test_membership_private_room_perms(self):
         room = self.created_rmid
         # get membership of self, get membership of other, private room + invite
         # expect all 403s
-        yield self.invite(room=room, src=self.rmcreator_id,
-                          targ=self.user_id)
-        yield self._test_get_membership(
-            members=[self.user_id, self.rmcreator_id],
-            room=room, expect_code=403)
+        self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
+        self._test_get_membership(
+            members=[self.user_id, self.rmcreator_id], room=room, expect_code=403
+        )
 
         # get membership of self, get membership of other, private room + joined
         # expect all 200s
-        yield self.join(room=room, user=self.user_id)
-        yield self._test_get_membership(
-            members=[self.user_id, self.rmcreator_id],
-            room=room, expect_code=200)
+        self.helper.join(room=room, user=self.user_id)
+        self._test_get_membership(
+            members=[self.user_id, self.rmcreator_id], room=room, expect_code=200
+        )
 
         # get membership of self, get membership of other, private room + left
         # expect all 200s
-        yield self.leave(room=room, user=self.user_id)
-        yield self._test_get_membership(
-            members=[self.user_id, self.rmcreator_id],
-            room=room, expect_code=200)
+        self.helper.leave(room=room, user=self.user_id)
+        self._test_get_membership(
+            members=[self.user_id, self.rmcreator_id], room=room, expect_code=200
+        )
 
-    @defer.inlineCallbacks
     def test_membership_public_room_perms(self):
         room = self.created_public_rmid
         # get membership of self, get membership of other, public room + invite
         # expect 403
-        yield self.invite(room=room, src=self.rmcreator_id,
-                          targ=self.user_id)
-        yield self._test_get_membership(
-            members=[self.user_id, self.rmcreator_id],
-            room=room, expect_code=403)
+        self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
+        self._test_get_membership(
+            members=[self.user_id, self.rmcreator_id], room=room, expect_code=403
+        )
 
         # get membership of self, get membership of other, public room + joined
         # expect all 200s
-        yield self.join(room=room, user=self.user_id)
-        yield self._test_get_membership(
-            members=[self.user_id, self.rmcreator_id],
-            room=room, expect_code=200)
+        self.helper.join(room=room, user=self.user_id)
+        self._test_get_membership(
+            members=[self.user_id, self.rmcreator_id], room=room, expect_code=200
+        )
 
         # get membership of self, get membership of other, public room + left
         # expect 200.
-        yield self.leave(room=room, user=self.user_id)
-        yield self._test_get_membership(
-            members=[self.user_id, self.rmcreator_id],
-            room=room, expect_code=200)
+        self.helper.leave(room=room, user=self.user_id)
+        self._test_get_membership(
+            members=[self.user_id, self.rmcreator_id], room=room, expect_code=200
+        )
 
-    @defer.inlineCallbacks
     def test_invited_permissions(self):
         room = self.created_rmid
-        yield self.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
+        self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
 
         # set [invite/join/left] of other user, expect 403s
-        yield self.invite(room=room, src=self.user_id, targ=self.rmcreator_id,
-                          expect_code=403)
-        yield self.change_membership(room=room, src=self.user_id,
-                                     targ=self.rmcreator_id,
-                                     membership=Membership.JOIN,
-                                     expect_code=403)
-        yield self.change_membership(room=room, src=self.user_id,
-                                     targ=self.rmcreator_id,
-                                     membership=Membership.LEAVE,
-                                     expect_code=403)
-
-    @defer.inlineCallbacks
+        self.helper.invite(
+            room=room, src=self.user_id, targ=self.rmcreator_id, expect_code=403
+        )
+        self.helper.change_membership(
+            room=room,
+            src=self.user_id,
+            targ=self.rmcreator_id,
+            membership=Membership.JOIN,
+            expect_code=403,
+        )
+        self.helper.change_membership(
+            room=room,
+            src=self.user_id,
+            targ=self.rmcreator_id,
+            membership=Membership.LEAVE,
+            expect_code=403,
+        )
+
     def test_joined_permissions(self):
         room = self.created_rmid
-        yield self.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
-        yield self.join(room=room, user=self.user_id)
+        self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
+        self.helper.join(room=room, user=self.user_id)
 
         # set invited of self, expect 403
-        yield self.invite(room=room, src=self.user_id, targ=self.user_id,
-                          expect_code=403)
+        self.helper.invite(
+            room=room, src=self.user_id, targ=self.user_id, expect_code=403
+        )
 
         # set joined of self, expect 200 (NOOP)
-        yield self.join(room=room, user=self.user_id)
+        self.helper.join(room=room, user=self.user_id)
 
         other = "@burgundy:red"
         # set invited of other, expect 200
-        yield self.invite(room=room, src=self.user_id, targ=other,
-                          expect_code=200)
+        self.helper.invite(room=room, src=self.user_id, targ=other, expect_code=200)
 
         # set joined of other, expect 403
-        yield self.change_membership(room=room, src=self.user_id,
-                                     targ=other,
-                                     membership=Membership.JOIN,
-                                     expect_code=403)
+        self.helper.change_membership(
+            room=room,
+            src=self.user_id,
+            targ=other,
+            membership=Membership.JOIN,
+            expect_code=403,
+        )
 
         # set left of other, expect 403
-        yield self.change_membership(room=room, src=self.user_id,
-                                     targ=other,
-                                     membership=Membership.LEAVE,
-                                     expect_code=403)
+        self.helper.change_membership(
+            room=room,
+            src=self.user_id,
+            targ=other,
+            membership=Membership.LEAVE,
+            expect_code=403,
+        )
 
         # set left of self, expect 200
-        yield self.leave(room=room, user=self.user_id)
+        self.helper.leave(room=room, user=self.user_id)
 
-    @defer.inlineCallbacks
     def test_leave_permissions(self):
         room = self.created_rmid
-        yield self.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
-        yield self.join(room=room, user=self.user_id)
-        yield self.leave(room=room, user=self.user_id)
+        self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
+        self.helper.join(room=room, user=self.user_id)
+        self.helper.leave(room=room, user=self.user_id)
 
         # set [invite/join/left] of self, set [invite/join/left] of other,
         # expect all 403s
         for usr in [self.user_id, self.rmcreator_id]:
-            yield self.change_membership(
+            self.helper.change_membership(
                 room=room,
                 src=self.user_id,
                 targ=usr,
                 membership=Membership.INVITE,
-                expect_code=403
+                expect_code=403,
             )
 
-            yield self.change_membership(
+            self.helper.change_membership(
                 room=room,
                 src=self.user_id,
                 targ=usr,
                 membership=Membership.JOIN,
-                expect_code=403
+                expect_code=403,
             )
 
         # It is always valid to LEAVE if you've already left (currently.)
-        yield self.change_membership(
+        self.helper.change_membership(
             room=room,
             src=self.user_id,
             targ=self.rmcreator_id,
             membership=Membership.LEAVE,
-            expect_code=403
+            expect_code=403,
         )
 
 
-class RoomsMemberListTestCase(RestTestCase):
+class RoomsMemberListTestCase(RoomBase):
     """ Tests /rooms/$room_id/members/list REST events."""
-    user_id = "@sid1:red"
 
-    @defer.inlineCallbacks
-    def setUp(self):
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
-
-        hs = yield setup_test_homeserver(
-            "red",
-            http_client=None,
-            federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=["send_message"]),
-        )
-        self.ratelimiter = hs.get_ratelimiter()
-        self.ratelimiter.send_message.return_value = (True, 0)
-
-        hs.get_handlers().federation_handler = Mock()
-
-        self.auth_user_id = self.user_id
-
-        def get_user_by_access_token(token=None, allow_guest=False):
-            return {
-                "user": UserID.from_string(self.auth_user_id),
-                "token_id": 1,
-                "is_guest": False,
-            }
-        hs.get_auth().get_user_by_access_token = get_user_by_access_token
-
-        def _insert_client_ip(*args, **kwargs):
-            return defer.succeed(None)
-        hs.get_datastore().insert_client_ip = _insert_client_ip
-
-        synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
-
-    def tearDown(self):
-        pass
+    user_id = b"@sid1:red"
 
-    @defer.inlineCallbacks
     def test_get_member_list(self):
-        room_id = yield self.create_room_as(self.user_id)
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/rooms/%s/members" % room_id
-        )
-        self.assertEquals(200, code, msg=str(response))
+        room_id = self.helper.create_room_as(self.user_id)
+        request, channel = make_request(b"GET", b"/rooms/%s/members" % room_id)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
-    @defer.inlineCallbacks
     def test_get_member_list_no_room(self):
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/rooms/roomdoesnotexist/members"
-        )
-        self.assertEquals(403, code, msg=str(response))
+        request, channel = make_request(b"GET", b"/rooms/roomdoesnotexist/members")
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
-    @defer.inlineCallbacks
     def test_get_member_list_no_permission(self):
-        room_id = yield self.create_room_as("@some_other_guy:red")
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/rooms/%s/members" % room_id
-        )
-        self.assertEquals(403, code, msg=str(response))
+        room_id = self.helper.create_room_as(b"@some_other_guy:red")
+        request, channel = make_request(b"GET", b"/rooms/%s/members" % room_id)
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
-    @defer.inlineCallbacks
     def test_get_member_list_mixed_memberships(self):
-        room_creator = "@some_other_guy:red"
-        room_id = yield self.create_room_as(room_creator)
-        room_path = "/rooms/%s/members" % room_id
-        yield self.invite(room=room_id, src=room_creator,
-                          targ=self.user_id)
+        room_creator = b"@some_other_guy:red"
+        room_id = self.helper.create_room_as(room_creator)
+        room_path = b"/rooms/%s/members" % room_id
+        self.helper.invite(room=room_id, src=room_creator, targ=self.user_id)
         # can't see list if you're just invited.
-        (code, response) = yield self.mock_resource.trigger_get(room_path)
-        self.assertEquals(403, code, msg=str(response))
+        request, channel = make_request(b"GET", room_path)
+        render(request, self.resource, self.clock)
+        self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
 
-        yield self.join(room=room_id, user=self.user_id)
+        self.helper.join(room=room_id, user=self.user_id)
         # can see list now joined
-        (code, response) = yield self.mock_resource.trigger_get(room_path)
-        self.assertEquals(200, code, msg=str(response))
+        request, channel = make_request(b"GET", room_path)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
-        yield self.leave(room=room_id, user=self.user_id)
+        self.helper.leave(room=room_id, user=self.user_id)
         # can see old list once left
-        (code, response) = yield self.mock_resource.trigger_get(room_path)
-        self.assertEquals(200, code, msg=str(response))
+        request, channel = make_request(b"GET", room_path)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
 
-class RoomsCreateTestCase(RestTestCase):
+class RoomsCreateTestCase(RoomBase):
     """ Tests /rooms and /rooms/$room_id REST events. """
-    user_id = "@sid1:red"
-
-    @defer.inlineCallbacks
-    def setUp(self):
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
-        self.auth_user_id = self.user_id
-
-        hs = yield setup_test_homeserver(
-            "red",
-            http_client=None,
-            federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=["send_message"]),
-        )
-        self.ratelimiter = hs.get_ratelimiter()
-        self.ratelimiter.send_message.return_value = (True, 0)
 
-        hs.get_handlers().federation_handler = Mock()
+    user_id = b"@sid1:red"
 
-        def get_user_by_access_token(token=None, allow_guest=False):
-            return {
-                "user": UserID.from_string(self.auth_user_id),
-                "token_id": 1,
-                "is_guest": False,
-            }
-        hs.get_auth().get_user_by_access_token = get_user_by_access_token
-
-        def _insert_client_ip(*args, **kwargs):
-            return defer.succeed(None)
-        hs.get_datastore().insert_client_ip = _insert_client_ip
-
-        synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
-
-    @defer.inlineCallbacks
     def test_post_room_no_keys(self):
         # POST with no config keys, expect new room id
-        (code, response) = yield self.mock_resource.trigger("POST",
-                                                            "/createRoom",
-                                                            "{}")
-        self.assertEquals(200, code, response)
-        self.assertTrue("room_id" in response)
+        request, channel = make_request(b"POST", b"/createRoom", b"{}")
+
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), channel.result)
+        self.assertTrue("room_id" in channel.json_body)
 
-    @defer.inlineCallbacks
     def test_post_room_visibility_key(self):
         # POST with visibility config key, expect new room id
-        (code, response) = yield self.mock_resource.trigger(
-            "POST",
-            "/createRoom",
-            '{"visibility":"private"}')
-        self.assertEquals(200, code)
-        self.assertTrue("room_id" in response)
-
-    @defer.inlineCallbacks
+        request, channel = make_request(
+            b"POST", b"/createRoom", b'{"visibility":"private"}'
+        )
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]))
+        self.assertTrue("room_id" in channel.json_body)
+
     def test_post_room_custom_key(self):
         # POST with custom config keys, expect new room id
-        (code, response) = yield self.mock_resource.trigger(
-            "POST",
-            "/createRoom",
-            '{"custom":"stuff"}')
-        self.assertEquals(200, code)
-        self.assertTrue("room_id" in response)
-
-    @defer.inlineCallbacks
+        request, channel = make_request(b"POST", b"/createRoom", b'{"custom":"stuff"}')
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]))
+        self.assertTrue("room_id" in channel.json_body)
+
     def test_post_room_known_and_unknown_keys(self):
         # POST with custom + known config keys, expect new room id
-        (code, response) = yield self.mock_resource.trigger(
-            "POST",
-            "/createRoom",
-            '{"visibility":"private","custom":"things"}')
-        self.assertEquals(200, code)
-        self.assertTrue("room_id" in response)
-
-    @defer.inlineCallbacks
+        request, channel = make_request(
+            b"POST", b"/createRoom", b'{"visibility":"private","custom":"things"}'
+        )
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]))
+        self.assertTrue("room_id" in channel.json_body)
+
     def test_post_room_invalid_content(self):
         # POST with invalid content / paths, expect 400
-        (code, response) = yield self.mock_resource.trigger(
-            "POST",
-            "/createRoom",
-            '{"visibili')
-        self.assertEquals(400, code)
+        request, channel = make_request(b"POST", b"/createRoom", b'{"visibili')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]))
 
-        (code, response) = yield self.mock_resource.trigger(
-            "POST",
-            "/createRoom",
-            '["hello"]')
-        self.assertEquals(400, code)
+        request, channel = make_request(b"POST", b"/createRoom", b'["hello"]')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]))
 
 
-class RoomTopicTestCase(RestTestCase):
+class RoomTopicTestCase(RoomBase):
     """ Tests /rooms/$room_id/topic REST events. """
-    user_id = "@sid1:red"
-
-    @defer.inlineCallbacks
-    def setUp(self):
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
-        self.auth_user_id = self.user_id
-
-        hs = yield setup_test_homeserver(
-            "red",
-            http_client=None,
-            federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=["send_message"]),
-        )
-        self.ratelimiter = hs.get_ratelimiter()
-        self.ratelimiter.send_message.return_value = (True, 0)
 
-        hs.get_handlers().federation_handler = Mock()
+    user_id = b"@sid1:red"
 
-        def get_user_by_access_token(token=None, allow_guest=False):
-            return {
-                "user": UserID.from_string(self.auth_user_id),
-                "token_id": 1,
-                "is_guest": False,
-            }
-
-        hs.get_auth().get_user_by_access_token = get_user_by_access_token
-
-        def _insert_client_ip(*args, **kwargs):
-            return defer.succeed(None)
-        hs.get_datastore().insert_client_ip = _insert_client_ip
+    def setUp(self):
 
-        synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+        super(RoomTopicTestCase, self).setUp()
 
         # create the room
-        self.room_id = yield self.create_room_as(self.user_id)
-        self.path = "/rooms/%s/state/m.room.topic" % (self.room_id,)
-
-    def tearDown(self):
-        pass
+        self.room_id = self.helper.create_room_as(self.user_id)
+        self.path = b"/rooms/%s/state/m.room.topic" % (self.room_id,)
 
-    @defer.inlineCallbacks
     def test_invalid_puts(self):
         # missing keys or invalid json
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", self.path, '{}'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", self.path, '{}')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", self.path, '{"_name":"bob"}'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", self.path, '{"_name":"bob"}')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", self.path, '{"nao'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", self.path, '{"nao')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", self.path, '[{"_name":"bob"},{"_name":"jill"}]'
+        request, channel = make_request(
+            b"PUT", self.path, '[{"_name":"bob"},{"_name":"jill"}]'
         )
-        self.assertEquals(400, code, msg=str(response))
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", self.path, 'text only'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", self.path, 'text only')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", self.path, ''
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", self.path, '')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
         # valid key, wrong type
         content = '{"topic":["Topic name"]}'
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", self.path, content
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", self.path, content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-    @defer.inlineCallbacks
     def test_rooms_topic(self):
         # nothing should be there
-        (code, response) = yield self.mock_resource.trigger_get(self.path)
-        self.assertEquals(404, code, msg=str(response))
+        request, channel = make_request(b"GET", self.path)
+        render(request, self.resource, self.clock)
+        self.assertEquals(404, int(channel.result["code"]), msg=channel.result["body"])
 
         # valid put
         content = '{"topic":"Topic name"}'
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", self.path, content
-        )
-        self.assertEquals(200, code, msg=str(response))
+        request, channel = make_request(b"PUT", self.path, content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
         # valid get
-        (code, response) = yield self.mock_resource.trigger_get(self.path)
-        self.assertEquals(200, code, msg=str(response))
-        self.assert_dict(json.loads(content), response)
+        request, channel = make_request(b"GET", self.path)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assert_dict(json.loads(content), channel.json_body)
 
-    @defer.inlineCallbacks
     def test_rooms_topic_with_extra_keys(self):
         # valid put with extra keys
         content = '{"topic":"Seasons","subtopic":"Summer"}'
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", self.path, content
-        )
-        self.assertEquals(200, code, msg=str(response))
+        request, channel = make_request(b"PUT", self.path, content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
         # valid get
-        (code, response) = yield self.mock_resource.trigger_get(self.path)
-        self.assertEquals(200, code, msg=str(response))
-        self.assert_dict(json.loads(content), response)
+        request, channel = make_request(b"GET", self.path)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assert_dict(json.loads(content), channel.json_body)
 
 
-class RoomMemberStateTestCase(RestTestCase):
+class RoomMemberStateTestCase(RoomBase):
     """ Tests /rooms/$room_id/members/$user_id/state REST events. """
-    user_id = "@sid1:red"
 
-    @defer.inlineCallbacks
-    def setUp(self):
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
-        self.auth_user_id = self.user_id
+    user_id = b"@sid1:red"
 
-        hs = yield setup_test_homeserver(
-            "red",
-            http_client=None,
-            federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=["send_message"]),
-        )
-        self.ratelimiter = hs.get_ratelimiter()
-        self.ratelimiter.send_message.return_value = (True, 0)
-
-        hs.get_handlers().federation_handler = Mock()
-
-        def get_user_by_access_token(token=None, allow_guest=False):
-            return {
-                "user": UserID.from_string(self.auth_user_id),
-                "token_id": 1,
-                "is_guest": False,
-            }
-        hs.get_auth().get_user_by_access_token = get_user_by_access_token
-
-        def _insert_client_ip(*args, **kwargs):
-            return defer.succeed(None)
-        hs.get_datastore().insert_client_ip = _insert_client_ip
-
-        synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+    def setUp(self):
 
-        self.room_id = yield self.create_room_as(self.user_id)
+        super(RoomMemberStateTestCase, self).setUp()
+        self.room_id = self.helper.create_room_as(self.user_id)
 
     def tearDown(self):
         pass
 
-    @defer.inlineCallbacks
     def test_invalid_puts(self):
         path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id)
         # missing keys or invalid json
-        (code, response) = yield self.mock_resource.trigger("PUT", path, '{}')
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, '{}')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, '{"_name":"bob"}'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, '{"_name":"bob"}')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, '{"nao'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, '{"nao')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, '[{"_name":"bob"},{"_name":"jill"}]'
+        request, channel = make_request(
+            b"PUT", path, b'[{"_name":"bob"},{"_name":"jill"}]'
         )
-        self.assertEquals(400, code, msg=str(response))
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, 'text only'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, 'text only')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, ''
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, '')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
         # valid keys, wrong types
-        content = ('{"membership":["%s","%s","%s"]}' % (
-            Membership.INVITE, Membership.JOIN, Membership.LEAVE
-        ))
-        (code, response) = yield self.mock_resource.trigger("PUT", path, content)
-        self.assertEquals(400, code, msg=str(response))
+        content = '{"membership":["%s","%s","%s"]}' % (
+            Membership.INVITE,
+            Membership.JOIN,
+            Membership.LEAVE,
+        )
+        request, channel = make_request(b"PUT", path, content.encode('ascii'))
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-    @defer.inlineCallbacks
     def test_rooms_members_self(self):
         path = "/rooms/%s/state/m.room.member/%s" % (
-            urlparse.quote(self.room_id), self.user_id
+            urlparse.quote(self.room_id),
+            self.user_id,
         )
 
         # valid join message (NOOP since we made the room)
         content = '{"membership":"%s"}' % Membership.JOIN
-        (code, response) = yield self.mock_resource.trigger("PUT", path, content)
-        self.assertEquals(200, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, content.encode('ascii'))
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger("GET", path, None)
-        self.assertEquals(200, code, msg=str(response))
+        request, channel = make_request(b"GET", path, None)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
-        expected_response = {
-            "membership": Membership.JOIN,
-        }
-        self.assertEquals(expected_response, response)
+        expected_response = {"membership": Membership.JOIN}
+        self.assertEquals(expected_response, channel.json_body)
 
-    @defer.inlineCallbacks
     def test_rooms_members_other(self):
         self.other_id = "@zzsid1:red"
         path = "/rooms/%s/state/m.room.member/%s" % (
-            urlparse.quote(self.room_id), self.other_id
+            urlparse.quote(self.room_id),
+            self.other_id,
         )
 
         # valid invite message
         content = '{"membership":"%s"}' % Membership.INVITE
-        (code, response) = yield self.mock_resource.trigger("PUT", path, content)
-        self.assertEquals(200, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger("GET", path, None)
-        self.assertEquals(200, code, msg=str(response))
-        self.assertEquals(json.loads(content), response)
+        request, channel = make_request(b"GET", path, None)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEquals(json.loads(content), channel.json_body)
 
-    @defer.inlineCallbacks
     def test_rooms_members_other_custom_keys(self):
         self.other_id = "@zzsid1:red"
         path = "/rooms/%s/state/m.room.member/%s" % (
-            urlparse.quote(self.room_id), self.other_id
+            urlparse.quote(self.room_id),
+            self.other_id,
         )
 
         # valid invite message with custom key
-        content = ('{"membership":"%s","invite_text":"%s"}' % (
-            Membership.INVITE, "Join us!"
-        ))
-        (code, response) = yield self.mock_resource.trigger("PUT", path, content)
-        self.assertEquals(200, code, msg=str(response))
+        content = '{"membership":"%s","invite_text":"%s"}' % (
+            Membership.INVITE,
+            "Join us!",
+        )
+        request, channel = make_request(b"PUT", path, content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger("GET", path, None)
-        self.assertEquals(200, code, msg=str(response))
-        self.assertEquals(json.loads(content), response)
+        request, channel = make_request(b"GET", path, None)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEquals(json.loads(content), channel.json_body)
 
 
-class RoomMessagesTestCase(RestTestCase):
+class RoomMessagesTestCase(RoomBase):
     """ Tests /rooms/$room_id/messages/$user_id/$msg_id REST events. """
+
     user_id = "@sid1:red"
 
-    @defer.inlineCallbacks
     def setUp(self):
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
-        self.auth_user_id = self.user_id
+        super(RoomMessagesTestCase, self).setUp()
 
-        hs = yield setup_test_homeserver(
-            "red",
-            http_client=None,
-            federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=["send_message"]),
-        )
-        self.ratelimiter = hs.get_ratelimiter()
-        self.ratelimiter.send_message.return_value = (True, 0)
+        self.room_id = self.helper.create_room_as(self.user_id)
 
-        hs.get_handlers().federation_handler = Mock()
-
-        def get_user_by_access_token(token=None, allow_guest=False):
-            return {
-                "user": UserID.from_string(self.auth_user_id),
-                "token_id": 1,
-                "is_guest": False,
-            }
-        hs.get_auth().get_user_by_access_token = get_user_by_access_token
-
-        def _insert_client_ip(*args, **kwargs):
-            return defer.succeed(None)
-        hs.get_datastore().insert_client_ip = _insert_client_ip
-
-        synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
-
-        self.room_id = yield self.create_room_as(self.user_id)
-
-    def tearDown(self):
-        pass
-
-    @defer.inlineCallbacks
     def test_invalid_puts(self):
-        path = "/rooms/%s/send/m.room.message/mid1" % (
-            urlparse.quote(self.room_id))
+        path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id))
         # missing keys or invalid json
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, '{}'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, '{}')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, '{"_name":"bob"}'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, '{"_name":"bob"}')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, '{"nao'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, '{"nao')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, '[{"_name":"bob"},{"_name":"jill"}]'
+        request, channel = make_request(
+            b"PUT", path, '[{"_name":"bob"},{"_name":"jill"}]'
         )
-        self.assertEquals(400, code, msg=str(response))
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, 'text only'
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, 'text only')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, ''
-        )
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, '')
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
-    @defer.inlineCallbacks
     def test_rooms_messages_sent(self):
-        path = "/rooms/%s/send/m.room.message/mid1" % (
-            urlparse.quote(self.room_id))
+        path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id))
 
         content = '{"body":"test","msgtype":{"type":"a"}}'
-        (code, response) = yield self.mock_resource.trigger("PUT", path, content)
-        self.assertEquals(400, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
 
         # custom message types
         content = '{"body":"test","msgtype":"test.custom.text"}'
-        (code, response) = yield self.mock_resource.trigger("PUT", path, content)
-        self.assertEquals(200, code, msg=str(response))
-
-#        (code, response) = yield self.mock_resource.trigger("GET", path, None)
-#        self.assertEquals(200, code, msg=str(response))
-#        self.assert_dict(json.loads(content), response)
+        request, channel = make_request(b"PUT", path, content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
         # m.text message type
-        path = "/rooms/%s/send/m.room.message/mid2" % (
-            urlparse.quote(self.room_id))
+        path = "/rooms/%s/send/m.room.message/mid2" % (urlparse.quote(self.room_id))
         content = '{"body":"test2","msgtype":"m.text"}'
-        (code, response) = yield self.mock_resource.trigger("PUT", path, content)
-        self.assertEquals(200, code, msg=str(response))
+        request, channel = make_request(b"PUT", path, content)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
 
 
-class RoomInitialSyncTestCase(RestTestCase):
+class RoomInitialSyncTestCase(RoomBase):
     """ Tests /rooms/$room_id/initialSync. """
+
     user_id = "@sid1:red"
 
-    @defer.inlineCallbacks
     def setUp(self):
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
-        self.auth_user_id = self.user_id
-
-        hs = yield setup_test_homeserver(
-            "red",
-            http_client=None,
-            federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=[
-                "send_message",
-            ]),
-        )
-        self.ratelimiter = hs.get_ratelimiter()
-        self.ratelimiter.send_message.return_value = (True, 0)
-
-        hs.get_handlers().federation_handler = Mock()
-
-        def get_user_by_access_token(token=None, allow_guest=False):
-            return {
-                "user": UserID.from_string(self.auth_user_id),
-                "token_id": 1,
-                "is_guest": False,
-            }
-        hs.get_auth().get_user_by_access_token = get_user_by_access_token
-
-        def _insert_client_ip(*args, **kwargs):
-            return defer.succeed(None)
-        hs.get_datastore().insert_client_ip = _insert_client_ip
-
-        synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+        super(RoomInitialSyncTestCase, self).setUp()
 
         # create the room
-        self.room_id = yield self.create_room_as(self.user_id)
+        self.room_id = self.helper.create_room_as(self.user_id)
 
-    @defer.inlineCallbacks
     def test_initial_sync(self):
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/rooms/%s/initialSync" % self.room_id
-        )
-        self.assertEquals(200, code)
+        request, channel = make_request(b"GET", "/rooms/%s/initialSync" % self.room_id)
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]))
 
-        self.assertEquals(self.room_id, response["room_id"])
-        self.assertEquals("join", response["membership"])
+        self.assertEquals(self.room_id, channel.json_body["room_id"])
+        self.assertEquals("join", channel.json_body["membership"])
 
         # Room state is easier to assert on if we unpack it into a dict
         state = {}
-        for event in response["state"]:
+        for event in channel.json_body["state"]:
             if "state_key" not in event:
                 continue
             t = event["type"]
@@ -978,75 +801,48 @@ class RoomInitialSyncTestCase(RestTestCase):
 
         self.assertTrue("m.room.create" in state)
 
-        self.assertTrue("messages" in response)
-        self.assertTrue("chunk" in response["messages"])
-        self.assertTrue("end" in response["messages"])
+        self.assertTrue("messages" in channel.json_body)
+        self.assertTrue("chunk" in channel.json_body["messages"])
+        self.assertTrue("end" in channel.json_body["messages"])
 
-        self.assertTrue("presence" in response)
+        self.assertTrue("presence" in channel.json_body)
 
         presence_by_user = {
-            e["content"]["user_id"]: e for e in response["presence"]
+            e["content"]["user_id"]: e for e in channel.json_body["presence"]
         }
         self.assertTrue(self.user_id in presence_by_user)
         self.assertEquals("m.presence", presence_by_user[self.user_id]["type"])
 
 
-class RoomMessageListTestCase(RestTestCase):
+class RoomMessageListTestCase(RoomBase):
     """ Tests /rooms/$room_id/messages REST events. """
+
     user_id = "@sid1:red"
 
-    @defer.inlineCallbacks
     def setUp(self):
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
-        self.auth_user_id = self.user_id
+        super(RoomMessageListTestCase, self).setUp()
+        self.room_id = self.helper.create_room_as(self.user_id)
 
-        hs = yield setup_test_homeserver(
-            "red",
-            http_client=None,
-            federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=["send_message"]),
-        )
-        self.ratelimiter = hs.get_ratelimiter()
-        self.ratelimiter.send_message.return_value = (True, 0)
-
-        hs.get_handlers().federation_handler = Mock()
-
-        def get_user_by_access_token(token=None, allow_guest=False):
-            return {
-                "user": UserID.from_string(self.auth_user_id),
-                "token_id": 1,
-                "is_guest": False,
-            }
-        hs.get_auth().get_user_by_access_token = get_user_by_access_token
-
-        def _insert_client_ip(*args, **kwargs):
-            return defer.succeed(None)
-        hs.get_datastore().insert_client_ip = _insert_client_ip
-
-        synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
-
-        self.room_id = yield self.create_room_as(self.user_id)
-
-    @defer.inlineCallbacks
     def test_topo_token_is_accepted(self):
         token = "t1-0_0_0_0_0_0_0_0_0"
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/rooms/%s/messages?access_token=x&from=%s" %
-            (self.room_id, token))
-        self.assertEquals(200, code)
-        self.assertTrue("start" in response)
-        self.assertEquals(token, response['start'])
-        self.assertTrue("chunk" in response)
-        self.assertTrue("end" in response)
-
-    @defer.inlineCallbacks
+        request, channel = make_request(
+            b"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)
+        )
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]))
+        self.assertTrue("start" in channel.json_body)
+        self.assertEquals(token, channel.json_body['start'])
+        self.assertTrue("chunk" in channel.json_body)
+        self.assertTrue("end" in channel.json_body)
+
     def test_stream_token_is_accepted_for_fwd_pagianation(self):
         token = "s0_0_0_0_0_0_0_0_0"
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/rooms/%s/messages?access_token=x&from=%s" %
-            (self.room_id, token))
-        self.assertEquals(200, code)
-        self.assertTrue("start" in response)
-        self.assertEquals(token, response['start'])
-        self.assertTrue("chunk" in response)
-        self.assertTrue("end" in response)
+        request, channel = make_request(
+            b"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)
+        )
+        render(request, self.resource, self.clock)
+        self.assertEquals(200, int(channel.result["code"]))
+        self.assertTrue("start" in channel.json_body)
+        self.assertEquals(token, channel.json_body['start'])
+        self.assertTrue("chunk" in channel.json_body)
+        self.assertTrue("end" in channel.json_body)
diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py
index fe161ee5cb..0ad814c5e5 100644
--- a/tests/rest/client/v1/test_typing.py
+++ b/tests/rest/client/v1/test_typing.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,44 +16,34 @@
 
 """Tests REST events for /rooms paths."""
 
-# twisted imports
+from mock import Mock, NonCallableMock
+
 from twisted.internet import defer
 
-import synapse.rest.client.v1.room
+from synapse.rest.client.v1 import room
 from synapse.types import UserID
 
-from ....utils import MockHttpResource, MockClock, setup_test_homeserver
-from .utils import RestTestCase
-
-from mock import Mock, NonCallableMock
-
+from tests import unittest
 
 PATH_PREFIX = "/_matrix/client/api/v1"
 
 
-class RoomTypingTestCase(RestTestCase):
+class RoomTypingTestCase(unittest.HomeserverTestCase):
     """ Tests /rooms/$room_id/typing/$user_id REST API. """
+
     user_id = "@sid:red"
 
     user = UserID.from_string(user_id)
+    servlets = [room.register_servlets]
 
-    @defer.inlineCallbacks
-    def setUp(self):
-        self.clock = MockClock()
-
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
-        self.auth_user_id = self.user_id
+    def make_homeserver(self, reactor, clock):
 
-        hs = yield setup_test_homeserver(
+        hs = self.setup_test_homeserver(
             "red",
-            clock=self.clock,
             http_client=None,
             federation_client=Mock(),
-            ratelimiter=NonCallableMock(spec_set=[
-                "send_message",
-            ]),
+            ratelimiter=NonCallableMock(spec_set=["send_message"]),
         )
-        self.hs = hs
 
         self.event_source = hs.get_event_sources().sources["typing"]
 
@@ -72,6 +63,7 @@ class RoomTypingTestCase(RestTestCase):
 
         def _insert_client_ip(*args, **kwargs):
             return defer.succeed(None)
+
         hs.get_datastore().insert_client_ip = _insert_client_ip
 
         def get_room_members(room_id):
@@ -95,63 +87,70 @@ class RoomTypingTestCase(RestTestCase):
                 else:
                     if remotedomains is not None:
                         remotedomains.add(member.domain)
+
         hs.get_room_member_handler().fetch_room_distributions_into = (
             fetch_room_distributions_into
         )
 
-        synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+        return hs
 
-        self.room_id = yield self.create_room_as(self.user_id)
+    def prepare(self, reactor, clock, hs):
+        self.room_id = self.helper.create_room_as(self.user_id)
         # Need another user to make notifications actually work
-        yield self.join(self.room_id, user="@jim:red")
+        self.helper.join(self.room_id, user="@jim:red")
 
-    @defer.inlineCallbacks
     def test_set_typing(self):
-        (code, _) = yield self.mock_resource.trigger(
-            "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
-            '{"typing": true, "timeout": 30000}'
+        request, channel = self.make_request(
+            "PUT",
+            "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
+            b'{"typing": true, "timeout": 30000}',
         )
-        self.assertEquals(200, code)
+        self.render(request)
+        self.assertEquals(200, channel.code)
 
         self.assertEquals(self.event_source.get_current_key(), 1)
-        events = yield self.event_source.get_new_events(
-            from_key=0,
-            room_ids=[self.room_id],
+        events = self.event_source.get_new_events(from_key=0, room_ids=[self.room_id])
+        self.assertEquals(
+            events[0],
+            [
+                {
+                    "type": "m.typing",
+                    "room_id": self.room_id,
+                    "content": {"user_ids": [self.user_id]},
+                }
+            ],
         )
-        self.assertEquals(events[0], [{
-            "type": "m.typing",
-            "room_id": self.room_id,
-            "content": {
-                "user_ids": [self.user_id],
-            }
-        }])
 
-    @defer.inlineCallbacks
     def test_set_not_typing(self):
-        (code, _) = yield self.mock_resource.trigger(
-            "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
-            '{"typing": false}'
+        request, channel = self.make_request(
+            "PUT",
+            "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
+            b'{"typing": false}',
         )
-        self.assertEquals(200, code)
+        self.render(request)
+        self.assertEquals(200, channel.code)
 
-    @defer.inlineCallbacks
     def test_typing_timeout(self):
-        (code, _) = yield self.mock_resource.trigger(
-            "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
-            '{"typing": true, "timeout": 30000}'
+        request, channel = self.make_request(
+            "PUT",
+            "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
+            b'{"typing": true, "timeout": 30000}',
         )
-        self.assertEquals(200, code)
+        self.render(request)
+        self.assertEquals(200, channel.code)
 
         self.assertEquals(self.event_source.get_current_key(), 1)
 
-        self.clock.advance_time(36)
+        self.reactor.advance(36)
 
         self.assertEquals(self.event_source.get_current_key(), 2)
 
-        (code, _) = yield self.mock_resource.trigger(
-            "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
-            '{"typing": true, "timeout": 30000}'
+        request, channel = self.make_request(
+            "PUT",
+            "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
+            b'{"typing": true, "timeout": 30000}',
         )
-        self.assertEquals(200, code)
+        self.render(request)
+        self.assertEquals(200, channel.code)
 
         self.assertEquals(self.event_source.get_current_key(), 3)
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index 3bb1dd003a..530dc8ba6d 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -13,16 +13,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# twisted imports
-from twisted.internet import defer
+import json
+import time
 
-# trial imports
-from tests import unittest
+import attr
+
+from twisted.internet import defer
 
 from synapse.api.constants import Membership
 
-import json
-import time
+from tests import unittest
+from tests.server import make_request, render
 
 
 class RestTestCase(unittest.TestCase):
@@ -54,25 +55,39 @@ class RestTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
-        yield self.change_membership(room=room, src=src, targ=targ, tok=tok,
-                                     membership=Membership.INVITE,
-                                     expect_code=expect_code)
+        yield self.change_membership(
+            room=room,
+            src=src,
+            targ=targ,
+            tok=tok,
+            membership=Membership.INVITE,
+            expect_code=expect_code,
+        )
 
     @defer.inlineCallbacks
     def join(self, room=None, user=None, expect_code=200, tok=None):
-        yield self.change_membership(room=room, src=user, targ=user, tok=tok,
-                                     membership=Membership.JOIN,
-                                     expect_code=expect_code)
+        yield self.change_membership(
+            room=room,
+            src=user,
+            targ=user,
+            tok=tok,
+            membership=Membership.JOIN,
+            expect_code=expect_code,
+        )
 
     @defer.inlineCallbacks
     def leave(self, room=None, user=None, expect_code=200, tok=None):
-        yield self.change_membership(room=room, src=user, targ=user, tok=tok,
-                                     membership=Membership.LEAVE,
-                                     expect_code=expect_code)
+        yield self.change_membership(
+            room=room,
+            src=user,
+            targ=user,
+            tok=tok,
+            membership=Membership.LEAVE,
+            expect_code=expect_code,
+        )
 
     @defer.inlineCallbacks
-    def change_membership(self, room, src, targ, membership, tok=None,
-                          expect_code=200):
+    def change_membership(self, room, src, targ, membership, tok=None, expect_code=200):
         temp_id = self.auth_user_id
         self.auth_user_id = src
 
@@ -80,16 +95,15 @@ class RestTestCase(unittest.TestCase):
         if tok:
             path = path + "?access_token=%s" % tok
 
-        data = {
-            "membership": membership
-        }
+        data = {"membership": membership}
 
         (code, response) = yield self.mock_resource.trigger(
             "PUT", path, json.dumps(data)
         )
         self.assertEquals(
-            expect_code, code,
-            msg="Expected: %d, got: %d, resp: %r" % (expect_code, code, response)
+            expect_code,
+            code,
+            msg="Expected: %d, got: %d, resp: %r" % (expect_code, code, response),
         )
 
         self.auth_user_id = temp_id
@@ -99,17 +113,15 @@ class RestTestCase(unittest.TestCase):
         (code, response) = yield self.mock_resource.trigger(
             "POST",
             "/register",
-            json.dumps({
-                "user": user_id,
-                "password": "test",
-                "type": "m.login.password"
-            }))
-        self.assertEquals(200, code)
+            json.dumps(
+                {"user": user_id, "password": "test", "type": "m.login.password"}
+            ),
+        )
+        self.assertEquals(200, code, msg=response)
         defer.returnValue(response)
 
     @defer.inlineCallbacks
-    def send(self, room_id, body=None, txn_id=None, tok=None,
-             expect_code=200):
+    def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200):
         if txn_id is None:
             txn_id = "m%s" % (str(time.time()))
         if body is None:
@@ -131,5 +143,120 @@ class RestTestCase(unittest.TestCase):
             actual (dict): The test result. Extra keys will not be checked.
         """
         for key in required:
-            self.assertEquals(required[key], actual[key],
-                              msg="%s mismatch. %s" % (key, actual))
+            self.assertEquals(
+                required[key], actual[key], msg="%s mismatch. %s" % (key, actual)
+            )
+
+
+@attr.s
+class RestHelper(object):
+    """Contains extra helper functions to quickly and clearly perform a given
+    REST action, which isn't the focus of the test.
+    """
+
+    hs = attr.ib()
+    resource = attr.ib()
+    auth_user_id = attr.ib()
+
+    def create_room_as(self, room_creator, is_public=True, tok=None):
+        temp_id = self.auth_user_id
+        self.auth_user_id = room_creator
+        path = "/_matrix/client/r0/createRoom"
+        content = {}
+        if not is_public:
+            content["visibility"] = "private"
+        if tok:
+            path = path + "?access_token=%s" % tok
+
+        request, channel = make_request(
+            "POST", path, json.dumps(content).encode('utf8')
+        )
+        render(request, self.resource, self.hs.get_reactor())
+
+        assert channel.result["code"] == b"200", channel.result
+        self.auth_user_id = temp_id
+        return channel.json_body["room_id"]
+
+    def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
+        self.change_membership(
+            room=room,
+            src=src,
+            targ=targ,
+            tok=tok,
+            membership=Membership.INVITE,
+            expect_code=expect_code,
+        )
+
+    def join(self, room=None, user=None, expect_code=200, tok=None):
+        self.change_membership(
+            room=room,
+            src=user,
+            targ=user,
+            tok=tok,
+            membership=Membership.JOIN,
+            expect_code=expect_code,
+        )
+
+    def leave(self, room=None, user=None, expect_code=200, tok=None):
+        self.change_membership(
+            room=room,
+            src=user,
+            targ=user,
+            tok=tok,
+            membership=Membership.LEAVE,
+            expect_code=expect_code,
+        )
+
+    def change_membership(self, room, src, targ, membership, tok=None, expect_code=200):
+        temp_id = self.auth_user_id
+        self.auth_user_id = src
+
+        path = "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" % (room, targ)
+        if tok:
+            path = path + "?access_token=%s" % tok
+
+        data = {"membership": membership}
+
+        request, channel = make_request("PUT", path, json.dumps(data).encode('utf8'))
+
+        render(request, self.resource, self.hs.get_reactor())
+
+        assert int(channel.result["code"]) == expect_code, (
+            "Expected: %d, got: %d, resp: %r"
+            % (expect_code, int(channel.result["code"]), channel.result["body"])
+        )
+
+        self.auth_user_id = temp_id
+
+    @defer.inlineCallbacks
+    def register(self, user_id):
+        (code, response) = yield self.mock_resource.trigger(
+            "POST",
+            "/_matrix/client/r0/register",
+            json.dumps(
+                {"user": user_id, "password": "test", "type": "m.login.password"}
+            ),
+        )
+        self.assertEquals(200, code)
+        defer.returnValue(response)
+
+    def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200):
+        if txn_id is None:
+            txn_id = "m%s" % (str(time.time()))
+        if body is None:
+            body = "body_text_here"
+
+        path = "/_matrix/client/r0/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
+        content = {"msgtype": "m.text", "body": body}
+        if tok:
+            path = path + "?access_token=%s" % tok
+
+        request, channel = make_request("PUT", path, json.dumps(content).encode('utf8'))
+        render(request, self.resource, self.hs.get_reactor())
+
+        assert int(channel.result["code"]) == expect_code, (
+            "Expected: %d, got: %d, resp: %r"
+            % (expect_code, int(channel.result["code"]), channel.result["body"])
+        )
+
+        return channel.json_body
diff --git a/tests/rest/client/v2_alpha/__init__.py b/tests/rest/client/v2_alpha/__init__.py
index 5170217d9e..e69de29bb2 100644
--- a/tests/rest/client/v2_alpha/__init__.py
+++ b/tests/rest/client/v2_alpha/__init__.py
@@ -1,62 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from tests import unittest
-
-from mock import Mock
-
-from ....utils import MockHttpResource, setup_test_homeserver
-
-from synapse.types import UserID
-
-from twisted.internet import defer
-
-
-PATH_PREFIX = "/_matrix/client/v2_alpha"
-
-
-class V2AlphaRestTestCase(unittest.TestCase):
-    # Consumer must define
-    #   USER_ID = <some string>
-    #   TO_REGISTER = [<list of REST servlets to register>]
-
-    @defer.inlineCallbacks
-    def setUp(self):
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
-
-        hs = yield setup_test_homeserver(
-            datastore=self.make_datastore_mock(),
-            http_client=None,
-            resource_for_client=self.mock_resource,
-            resource_for_federation=self.mock_resource,
-        )
-
-        def get_user_by_access_token(token=None, allow_guest=False):
-            return {
-                "user": UserID.from_string(self.USER_ID),
-                "token_id": 1,
-                "is_guest": False,
-            }
-        hs.get_auth().get_user_by_access_token = get_user_by_access_token
-
-        for r in self.TO_REGISTER:
-            r.register_servlets(hs, self.mock_resource)
-
-    def make_datastore_mock(self):
-        store = Mock(spec=[
-            "insert_client_ip",
-        ])
-        store.get_app_service_by_token = Mock(return_value=None)
-        return store
diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py
index 76b833e119..6a886ee3b8 100644
--- a/tests/rest/client/v2_alpha/test_filter.py
+++ b/tests/rest/client/v2_alpha/test_filter.py
@@ -13,19 +13,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
-
-from tests import unittest
-
-from synapse.rest.client.v2_alpha import filter
-
-from synapse.api.errors import Codes
-
 import synapse.types
-
+from synapse.api.errors import Codes
+from synapse.http.server import JsonResource
+from synapse.rest.client.v2_alpha import filter
 from synapse.types import UserID
+from synapse.util import Clock
 
-from ....utils import MockHttpResource, setup_test_homeserver
+from tests import unittest
+from tests.server import (
+    ThreadedMemoryReactorClock as MemoryReactorClock,
+    make_request,
+    render,
+    setup_test_homeserver,
+)
 
 PATH_PREFIX = "/_matrix/client/v2_alpha"
 
@@ -34,17 +35,15 @@ class FilterTestCase(unittest.TestCase):
 
     USER_ID = "@apple:test"
     EXAMPLE_FILTER = {"room": {"timeline": {"types": ["m.room.message"]}}}
-    EXAMPLE_FILTER_JSON = '{"room": {"timeline": {"types": ["m.room.message"]}}}'
+    EXAMPLE_FILTER_JSON = b'{"room": {"timeline": {"types": ["m.room.message"]}}}'
     TO_REGISTER = [filter]
 
-    @defer.inlineCallbacks
     def setUp(self):
-        self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+        self.clock = MemoryReactorClock()
+        self.hs_clock = Clock(self.clock)
 
-        self.hs = yield setup_test_homeserver(
-            http_client=None,
-            resource_for_client=self.mock_resource,
-            resource_for_federation=self.mock_resource,
+        self.hs = setup_test_homeserver(
+            self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
         )
 
         self.auth = self.hs.get_auth()
@@ -58,82 +57,96 @@ class FilterTestCase(unittest.TestCase):
 
         def get_user_by_req(request, allow_guest=False, rights="access"):
             return synapse.types.create_requester(
-                UserID.from_string(self.USER_ID), 1, False, None)
+                UserID.from_string(self.USER_ID), 1, False, None
+            )
 
         self.auth.get_user_by_access_token = get_user_by_access_token
         self.auth.get_user_by_req = get_user_by_req
 
         self.store = self.hs.get_datastore()
         self.filtering = self.hs.get_filtering()
+        self.resource = JsonResource(self.hs)
 
         for r in self.TO_REGISTER:
-            r.register_servlets(self.hs, self.mock_resource)
+            r.register_servlets(self.hs, self.resource)
 
-    @defer.inlineCallbacks
     def test_add_filter(self):
-        (code, response) = yield self.mock_resource.trigger(
-            "POST", "/user/%s/filter" % (self.USER_ID), self.EXAMPLE_FILTER_JSON
-        )
-        self.assertEquals(200, code)
-        self.assertEquals({"filter_id": "0"}, response)
-        filter = yield self.store.get_user_filter(
-            user_localpart='apple',
-            filter_id=0,
+        request, channel = make_request(
+            "POST",
+            "/_matrix/client/r0/user/%s/filter" % (self.USER_ID),
+            self.EXAMPLE_FILTER_JSON,
         )
-        self.assertEquals(filter, self.EXAMPLE_FILTER)
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(channel.result["code"], b"200")
+        self.assertEqual(channel.json_body, {"filter_id": "0"})
+        filter = self.store.get_user_filter(user_localpart="apple", filter_id=0)
+        self.clock.advance(0)
+        self.assertEquals(filter.result, self.EXAMPLE_FILTER)
 
-    @defer.inlineCallbacks
     def test_add_filter_for_other_user(self):
-        (code, response) = yield self.mock_resource.trigger(
-            "POST", "/user/%s/filter" % ('@watermelon:test'), self.EXAMPLE_FILTER_JSON
+        request, channel = make_request(
+            "POST",
+            "/_matrix/client/r0/user/%s/filter" % ("@watermelon:test"),
+            self.EXAMPLE_FILTER_JSON,
         )
-        self.assertEquals(403, code)
-        self.assertEquals(response['errcode'], Codes.FORBIDDEN)
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(channel.result["code"], b"403")
+        self.assertEquals(channel.json_body["errcode"], Codes.FORBIDDEN)
 
-    @defer.inlineCallbacks
     def test_add_filter_non_local_user(self):
         _is_mine = self.hs.is_mine
         self.hs.is_mine = lambda target_user: False
-        (code, response) = yield self.mock_resource.trigger(
-            "POST", "/user/%s/filter" % (self.USER_ID), self.EXAMPLE_FILTER_JSON
+        request, channel = make_request(
+            "POST",
+            "/_matrix/client/r0/user/%s/filter" % (self.USER_ID),
+            self.EXAMPLE_FILTER_JSON,
         )
+        render(request, self.resource, self.clock)
+
         self.hs.is_mine = _is_mine
-        self.assertEquals(403, code)
-        self.assertEquals(response['errcode'], Codes.FORBIDDEN)
+        self.assertEqual(channel.result["code"], b"403")
+        self.assertEquals(channel.json_body["errcode"], Codes.FORBIDDEN)
 
-    @defer.inlineCallbacks
     def test_get_filter(self):
-        filter_id = yield self.filtering.add_user_filter(
-            user_localpart='apple',
-            user_filter=self.EXAMPLE_FILTER
+        filter_id = self.filtering.add_user_filter(
+            user_localpart="apple", user_filter=self.EXAMPLE_FILTER
         )
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/user/%s/filter/%s" % (self.USER_ID, filter_id)
+        self.clock.advance(1)
+        filter_id = filter_id.result
+        request, channel = make_request(
+            "GET", "/_matrix/client/r0/user/%s/filter/%s" % (self.USER_ID, filter_id)
         )
-        self.assertEquals(200, code)
-        self.assertEquals(self.EXAMPLE_FILTER, response)
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(channel.result["code"], b"200")
+        self.assertEquals(channel.json_body, self.EXAMPLE_FILTER)
 
-    @defer.inlineCallbacks
     def test_get_filter_non_existant(self):
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/user/%s/filter/12382148321" % (self.USER_ID)
+        request, channel = make_request(
+            "GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.USER_ID)
         )
-        self.assertEquals(400, code)
-        self.assertEquals(response['errcode'], Codes.NOT_FOUND)
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(channel.result["code"], b"400")
+        self.assertEquals(channel.json_body["errcode"], Codes.NOT_FOUND)
 
     # Currently invalid params do not have an appropriate errcode
     # in errors.py
-    @defer.inlineCallbacks
     def test_get_filter_invalid_id(self):
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/user/%s/filter/foobar" % (self.USER_ID)
+        request, channel = make_request(
+            "GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.USER_ID)
         )
-        self.assertEquals(400, code)
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(channel.result["code"], b"400")
 
     # No ID also returns an invalid_id error
-    @defer.inlineCallbacks
     def test_get_filter_no_id(self):
-        (code, response) = yield self.mock_resource.trigger_get(
-            "/user/%s/filter/" % (self.USER_ID)
+        request, channel = make_request(
+            "GET", "/_matrix/client/r0/user/%s/filter/" % (self.USER_ID)
         )
-        self.assertEquals(400, code)
+        render(request, self.resource, self.clock)
+
+        self.assertEqual(channel.result["code"], b"400")
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index 8aba456510..1c128e81f5 100644
--- a/tests/rest/client/v2_alpha/test_register.py
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -1,163 +1,176 @@
-from twisted.python import failure
+import json
 
-from synapse.rest.client.v2_alpha.register import RegisterRestServlet
-from synapse.api.errors import SynapseError, InteractiveAuthIncompleteError
-from twisted.internet import defer
 from mock import Mock
+
+from twisted.python import failure
+from twisted.test.proto_helpers import MemoryReactorClock
+
+from synapse.api.errors import InteractiveAuthIncompleteError
+from synapse.http.server import JsonResource
+from synapse.rest.client.v2_alpha.register import register_servlets
+from synapse.util import Clock
+
 from tests import unittest
-from tests.utils import mock_getRawHeaders
-import json
+from tests.server import make_request, render, setup_test_homeserver
 
 
 class RegisterRestServletTestCase(unittest.TestCase):
-
     def setUp(self):
-        # do the dance to hook up request data to self.request_data
-        self.request_data = ""
-        self.request = Mock(
-            content=Mock(read=Mock(side_effect=lambda: self.request_data)),
-            path='/_matrix/api/v2_alpha/register'
-        )
-        self.request.args = {}
-        self.request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+
+        self.clock = MemoryReactorClock()
+        self.hs_clock = Clock(self.clock)
+        self.url = b"/_matrix/client/r0/register"
 
         self.appservice = None
-        self.auth = Mock(get_appservice_by_req=Mock(
-            side_effect=lambda x: self.appservice)
+        self.auth = Mock(
+            get_appservice_by_req=Mock(side_effect=lambda x: self.appservice)
         )
 
         self.auth_result = failure.Failure(InteractiveAuthIncompleteError(None))
         self.auth_handler = Mock(
             check_auth=Mock(side_effect=lambda x, y, z: self.auth_result),
-            get_session_data=Mock(return_value=None)
+            get_session_data=Mock(return_value=None),
         )
         self.registration_handler = Mock()
         self.identity_handler = Mock()
         self.login_handler = Mock()
         self.device_handler = Mock()
+        self.device_handler.check_device_registered = Mock(return_value="FAKE")
+
+        self.datastore = Mock(return_value=Mock())
+        self.datastore.get_current_state_deltas = Mock(return_value=[])
 
         # do the dance to hook it up to the hs global
         self.handlers = Mock(
             registration_handler=self.registration_handler,
             identity_handler=self.identity_handler,
-            login_handler=self.login_handler
+            login_handler=self.login_handler,
+        )
+        self.hs = setup_test_homeserver(
+            self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
         )
-        self.hs = Mock()
-        self.hs.hostname = "superbig~testing~thing.com"
         self.hs.get_auth = Mock(return_value=self.auth)
         self.hs.get_handlers = Mock(return_value=self.handlers)
         self.hs.get_auth_handler = Mock(return_value=self.auth_handler)
         self.hs.get_device_handler = Mock(return_value=self.device_handler)
+        self.hs.get_datastore = Mock(return_value=self.datastore)
         self.hs.config.enable_registration = True
         self.hs.config.registrations_require_3pid = []
         self.hs.config.auto_join_rooms = []
 
-        # init the thing we're testing
-        self.servlet = RegisterRestServlet(self.hs)
+        self.resource = JsonResource(self.hs)
+        register_servlets(self.hs, self.resource)
 
-    @defer.inlineCallbacks
     def test_POST_appservice_registration_valid(self):
         user_id = "@kermit:muppet"
         token = "kermits_access_token"
-        self.request.args = {
-            "access_token": "i_am_an_app_service"
-        }
-        self.request_data = json.dumps({
-            "username": "kermit"
-        })
-        self.appservice = {
-            "id": "1234"
-        }
-        self.registration_handler.appservice_register = Mock(
-            return_value=user_id
-        )
-        self.auth_handler.get_access_token_for_user_id = Mock(
-            return_value=token
+        self.appservice = {"id": "1234"}
+        self.registration_handler.appservice_register = Mock(return_value=user_id)
+        self.auth_handler.get_access_token_for_user_id = Mock(return_value=token)
+        request_data = json.dumps({"username": "kermit"})
+
+        request, channel = make_request(
+            b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
         )
+        render(request, self.resource, self.clock)
 
-        (code, result) = yield self.servlet.on_POST(self.request)
-        self.assertEquals(code, 200)
+        self.assertEquals(channel.result["code"], b"200", channel.result)
         det_data = {
             "user_id": user_id,
             "access_token": token,
-            "home_server": self.hs.hostname
+            "home_server": self.hs.hostname,
         }
-        self.assertDictContainsSubset(det_data, result)
+        self.assertDictContainsSubset(det_data, channel.json_body)
 
-    @defer.inlineCallbacks
     def test_POST_appservice_registration_invalid(self):
-        self.request.args = {
-            "access_token": "i_am_an_app_service"
-        }
-
-        self.request_data = json.dumps({
-            "username": "kermit"
-        })
         self.appservice = None  # no application service exists
-        result = yield self.servlet.on_POST(self.request)
-        self.assertEquals(result, (401, None))
+        request_data = json.dumps({"username": "kermit"})
+        request, channel = make_request(
+            b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
+        )
+        render(request, self.resource, self.clock)
+
+        self.assertEquals(channel.result["code"], b"401", channel.result)
 
     def test_POST_bad_password(self):
-        self.request_data = json.dumps({
-            "username": "kermit",
-            "password": 666
-        })
-        d = self.servlet.on_POST(self.request)
-        return self.assertFailure(d, SynapseError)
+        request_data = json.dumps({"username": "kermit", "password": 666})
+        request, channel = make_request(b"POST", self.url, request_data)
+        render(request, self.resource, self.clock)
+
+        self.assertEquals(channel.result["code"], b"400", channel.result)
+        self.assertEquals(channel.json_body["error"], "Invalid password")
 
     def test_POST_bad_username(self):
-        self.request_data = json.dumps({
-            "username": 777,
-            "password": "monkey"
-        })
-        d = self.servlet.on_POST(self.request)
-        return self.assertFailure(d, SynapseError)
-
-    @defer.inlineCallbacks
+        request_data = json.dumps({"username": 777, "password": "monkey"})
+        request, channel = make_request(b"POST", self.url, request_data)
+        render(request, self.resource, self.clock)
+
+        self.assertEquals(channel.result["code"], b"400", channel.result)
+        self.assertEquals(channel.json_body["error"], "Invalid username")
+
     def test_POST_user_valid(self):
         user_id = "@kermit:muppet"
         token = "kermits_access_token"
         device_id = "frogfone"
-        self.request_data = json.dumps({
-            "username": "kermit",
-            "password": "monkey",
-            "device_id": device_id,
-        })
+        request_data = json.dumps(
+            {"username": "kermit", "password": "monkey", "device_id": device_id}
+        )
         self.registration_handler.check_username = Mock(return_value=True)
-        self.auth_result = (None, {
-            "username": "kermit",
-            "password": "monkey"
-        }, None)
+        self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None)
         self.registration_handler.register = Mock(return_value=(user_id, None))
-        self.auth_handler.get_access_token_for_user_id = Mock(
-            return_value=token
-        )
-        self.device_handler.check_device_registered = \
-            Mock(return_value=device_id)
+        self.auth_handler.get_access_token_for_user_id = Mock(return_value=token)
+        self.device_handler.check_device_registered = Mock(return_value=device_id)
+
+        request, channel = make_request(b"POST", self.url, request_data)
+        render(request, self.resource, self.clock)
 
-        (code, result) = yield self.servlet.on_POST(self.request)
-        self.assertEquals(code, 200)
         det_data = {
             "user_id": user_id,
             "access_token": token,
             "home_server": self.hs.hostname,
             "device_id": device_id,
         }
-        self.assertDictContainsSubset(det_data, result)
+        self.assertEquals(channel.result["code"], b"200", channel.result)
+        self.assertDictContainsSubset(det_data, channel.json_body)
         self.auth_handler.get_login_tuple_for_user_id(
-            user_id, device_id=device_id, initial_device_display_name=None)
+            user_id, device_id=device_id, initial_device_display_name=None
+        )
 
     def test_POST_disabled_registration(self):
         self.hs.config.enable_registration = False
-        self.request_data = json.dumps({
-            "username": "kermit",
-            "password": "monkey"
-        })
+        request_data = json.dumps({"username": "kermit", "password": "monkey"})
         self.registration_handler.check_username = Mock(return_value=True)
-        self.auth_result = (None, {
-            "username": "kermit",
-            "password": "monkey"
-        }, None)
+        self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None)
         self.registration_handler.register = Mock(return_value=("@user:id", "t"))
-        d = self.servlet.on_POST(self.request)
-        return self.assertFailure(d, SynapseError)
+
+        request, channel = make_request(b"POST", self.url, request_data)
+        render(request, self.resource, self.clock)
+
+        self.assertEquals(channel.result["code"], b"403", channel.result)
+        self.assertEquals(channel.json_body["error"], "Registration has been disabled")
+
+    def test_POST_guest_registration(self):
+        user_id = "a@b"
+        self.hs.config.macaroon_secret_key = "test"
+        self.hs.config.allow_guest_access = True
+        self.registration_handler.register = Mock(return_value=(user_id, None))
+
+        request, channel = make_request(b"POST", self.url + b"?kind=guest", b"{}")
+        render(request, self.resource, self.clock)
+
+        det_data = {
+            "user_id": user_id,
+            "home_server": self.hs.hostname,
+            "device_id": "guest_device",
+        }
+        self.assertEquals(channel.result["code"], b"200", channel.result)
+        self.assertDictContainsSubset(det_data, channel.json_body)
+
+    def test_POST_disabled_guest_registration(self):
+        self.hs.config.allow_guest_access = False
+
+        request, channel = make_request(b"POST", self.url + b"?kind=guest", b"{}")
+        render(request, self.resource, self.clock)
+
+        self.assertEquals(channel.result["code"], b"403", channel.result)
+        self.assertEquals(channel.json_body["error"], "Guest access is disabled")
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
new file mode 100644
index 0000000000..560b1fba96
--- /dev/null
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from synapse.rest.client.v2_alpha import sync
+
+from tests import unittest
+
+
+class FilterTestCase(unittest.HomeserverTestCase):
+
+    user_id = "@apple:test"
+    servlets = [sync.register_servlets]
+
+    def make_homeserver(self, reactor, clock):
+
+        hs = self.setup_test_homeserver(
+            "red", http_client=None, federation_client=Mock()
+        )
+        return hs
+
+    def test_sync_argless(self):
+        request, channel = self.make_request("GET", "/sync")
+        self.render(request)
+
+        self.assertEqual(channel.code, 200)
+        self.assertTrue(
+            set(
+                [
+                    "next_batch",
+                    "rooms",
+                    "presence",
+                    "account_data",
+                    "to_device",
+                    "device_lists",
+                ]
+            ).issubset(set(channel.json_body.keys()))
+        )
+
+    def test_sync_presence_disabled(self):
+        """
+        When presence is disabled, the key does not appear in /sync.
+        """
+        self.hs.config.use_presence = False
+
+        request, channel = self.make_request("GET", "/sync")
+        self.render(request)
+
+        self.assertEqual(channel.code, 200)
+        self.assertTrue(
+            set(
+                [
+                    "next_batch",
+                    "rooms",
+                    "account_data",
+                    "to_device",
+                    "device_lists",
+                ]
+            ).issubset(set(channel.json_body.keys()))
+        )
diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py
index eef38b6781..a86901c2d8 100644
--- a/tests/rest/media/v1/test_media_storage.py
+++ b/tests/rest/media/v1/test_media_storage.py
@@ -14,21 +14,21 @@
 # limitations under the License.
 
 
-from twisted.internet import defer
+import os
+import shutil
+import tempfile
+
+from mock import Mock
+
+from twisted.internet import defer, reactor
 
 from synapse.rest.media.v1._base import FileInfo
-from synapse.rest.media.v1.media_storage import MediaStorage
 from synapse.rest.media.v1.filepath import MediaFilePaths
+from synapse.rest.media.v1.media_storage import MediaStorage
 from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend
 
-from mock import Mock
-
 from tests import unittest
 
-import os
-import shutil
-import tempfile
-
 
 class MediaStorageTests(unittest.TestCase):
     def setUp(self):
@@ -38,15 +38,14 @@ class MediaStorageTests(unittest.TestCase):
         self.secondary_base_path = os.path.join(self.test_dir, "secondary")
 
         hs = Mock()
+        hs.get_reactor = Mock(return_value=reactor)
         hs.config.media_store_path = self.primary_base_path
 
-        storage_providers = [FileStorageProviderBackend(
-            hs, self.secondary_base_path
-        )]
+        storage_providers = [FileStorageProviderBackend(hs, self.secondary_base_path)]
 
         self.filepaths = MediaFilePaths(self.primary_base_path)
         self.media_storage = MediaStorage(
-            self.primary_base_path, self.filepaths, storage_providers,
+            hs, self.primary_base_path, self.filepaths, storage_providers
         )
 
     def tearDown(self):
diff --git a/tests/server.py b/tests/server.py
new file mode 100644
index 0000000000..7dbdb7f8ea
--- /dev/null
+++ b/tests/server.py
@@ -0,0 +1,241 @@
+import json
+from io import BytesIO
+
+from six import text_type
+
+import attr
+
+from twisted.internet import address, threads
+from twisted.internet.defer import Deferred
+from twisted.python.failure import Failure
+from twisted.test.proto_helpers import MemoryReactorClock
+
+from synapse.http.site import SynapseRequest
+from synapse.util import Clock
+
+from tests.utils import setup_test_homeserver as _sth
+
+
+@attr.s
+class FakeChannel(object):
+    """
+    A fake Twisted Web Channel (the part that interfaces with the
+    wire).
+    """
+
+    result = attr.ib(default=attr.Factory(dict))
+    _producer = None
+
+    @property
+    def json_body(self):
+        if not self.result:
+            raise Exception("No result yet.")
+        return json.loads(self.result["body"].decode('utf8'))
+
+    @property
+    def code(self):
+        if not self.result:
+            raise Exception("No result yet.")
+        return int(self.result["code"])
+
+    def writeHeaders(self, version, code, reason, headers):
+        self.result["version"] = version
+        self.result["code"] = code
+        self.result["reason"] = reason
+        self.result["headers"] = headers
+
+    def write(self, content):
+        if "body" not in self.result:
+            self.result["body"] = b""
+
+        self.result["body"] += content
+
+    def registerProducer(self, producer, streaming):
+        self._producer = producer
+
+    def unregisterProducer(self):
+        if self._producer is None:
+            return
+
+        self._producer = None
+
+    def requestDone(self, _self):
+        self.result["done"] = True
+
+    def getPeer(self):
+        # We give an address so that getClientIP returns a non null entry,
+        # causing us to record the MAU
+        return address.IPv4Address(b"TCP", "127.0.0.1", 3423)
+
+    def getHost(self):
+        return None
+
+    @property
+    def transport(self):
+        return self
+
+
+class FakeSite:
+    """
+    A fake Twisted Web Site, with mocks of the extra things that
+    Synapse adds.
+    """
+
+    server_version_string = b"1"
+    site_tag = "test"
+
+    @property
+    def access_logger(self):
+        class FakeLogger:
+            def info(self, *args, **kwargs):
+                pass
+
+        return FakeLogger()
+
+
+def make_request(method, path, content=b"", access_token=None):
+    """
+    Make a web request using the given method and path, feed it the
+    content, and return the Request and the Channel underneath.
+    """
+    if not isinstance(method, bytes):
+        method = method.encode('ascii')
+
+    if not isinstance(path, bytes):
+        path = path.encode('ascii')
+
+    # Decorate it to be the full path
+    if not path.startswith(b"/_matrix"):
+        path = b"/_matrix/client/r0/" + path
+        path = path.replace(b"//", b"/")
+
+    if isinstance(content, text_type):
+        content = content.encode('utf8')
+
+    site = FakeSite()
+    channel = FakeChannel()
+
+    req = SynapseRequest(site, channel)
+    req.process = lambda: b""
+    req.content = BytesIO(content)
+
+    if access_token:
+        req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token)
+
+    req.requestHeaders.addRawHeader(b"X-Forwarded-For", b"127.0.0.1")
+    req.requestReceived(method, path, b"1.1")
+
+    return req, channel
+
+
+def wait_until_result(clock, request, timeout=100):
+    """
+    Wait until the request is finished.
+    """
+    clock.run()
+    x = 0
+
+    while not request.finished:
+
+        # If there's a producer, tell it to resume producing so we get content
+        if request._channel._producer:
+            request._channel._producer.resumeProducing()
+
+        x += 1
+
+        if x > timeout:
+            raise Exception("Timed out waiting for request to finish.")
+
+        clock.advance(0.1)
+
+
+def render(request, resource, clock):
+    request.render(resource)
+    wait_until_result(clock, request)
+
+
+class ThreadedMemoryReactorClock(MemoryReactorClock):
+    """
+    A MemoryReactorClock that supports callFromThread.
+    """
+
+    def callFromThread(self, callback, *args, **kwargs):
+        """
+        Make the callback fire in the next reactor iteration.
+        """
+        d = Deferred()
+        d.addCallback(lambda x: callback(*args, **kwargs))
+        self.callLater(0, d.callback, True)
+        return d
+
+
+def setup_test_homeserver(cleanup_func, *args, **kwargs):
+    """
+    Set up a synchronous test server, driven by the reactor used by
+    the homeserver.
+    """
+    d = _sth(cleanup_func, *args, **kwargs).result
+
+    if isinstance(d, Failure):
+        d.raiseException()
+
+    # Make the thread pool synchronous.
+    clock = d.get_clock()
+    pool = d.get_db_pool()
+
+    def runWithConnection(func, *args, **kwargs):
+        return threads.deferToThreadPool(
+            pool._reactor,
+            pool.threadpool,
+            pool._runWithConnection,
+            func,
+            *args,
+            **kwargs
+        )
+
+    def runInteraction(interaction, *args, **kwargs):
+        return threads.deferToThreadPool(
+            pool._reactor,
+            pool.threadpool,
+            pool._runInteraction,
+            interaction,
+            *args,
+            **kwargs
+        )
+
+    pool.runWithConnection = runWithConnection
+    pool.runInteraction = runInteraction
+
+    class ThreadPool:
+        """
+        Threadless thread pool.
+        """
+
+        def start(self):
+            pass
+
+        def stop(self):
+            pass
+
+        def callInThreadWithCallback(self, onResult, function, *args, **kwargs):
+            def _(res):
+                if isinstance(res, Failure):
+                    onResult(False, res)
+                else:
+                    onResult(True, res)
+
+            d = Deferred()
+            d.addCallback(lambda x: function(*args, **kwargs))
+            d.addBoth(_)
+            clock._reactor.callLater(0, d.callback, True)
+            return d
+
+    clock.threadpool = ThreadPool()
+    pool.threadpool = ThreadPool()
+    return d
+
+
+def get_clock():
+    clock = ThreadedMemoryReactorClock()
+    hs_clock = Clock(clock)
+    return (clock, hs_clock)
diff --git a/tests/server_notices/__init__.py b/tests/server_notices/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/server_notices/__init__.py
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
new file mode 100644
index 0000000000..5cc7fff39b
--- /dev/null
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -0,0 +1,213 @@
+from mock import Mock
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, ServerNoticeMsgType
+from synapse.api.errors import ResourceLimitError
+from synapse.handlers.auth import AuthHandler
+from synapse.server_notices.resource_limits_server_notices import (
+    ResourceLimitsServerNotices,
+)
+
+from tests import unittest
+from tests.utils import setup_test_homeserver
+
+
+class AuthHandlers(object):
+    def __init__(self, hs):
+        self.auth_handler = AuthHandler(hs)
+
+
+class TestResourceLimitsServerNotices(unittest.TestCase):
+    @defer.inlineCallbacks
+    def setUp(self):
+        self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None)
+        self.hs.handlers = AuthHandlers(self.hs)
+        self.auth_handler = self.hs.handlers.auth_handler
+        self.server_notices_sender = self.hs.get_server_notices_sender()
+
+        # relying on [1] is far from ideal, but the only case where
+        # ResourceLimitsServerNotices class needs to be isolated is this test,
+        # general code should never have a reason to do so ...
+        self._rlsn = self.server_notices_sender._server_notices[1]
+        if not isinstance(self._rlsn, ResourceLimitsServerNotices):
+            raise Exception("Failed to find reference to ResourceLimitsServerNotices")
+
+        self._rlsn._store.user_last_seen_monthly_active = Mock(
+            return_value=defer.succeed(1000)
+        )
+        self._send_notice = self._rlsn._server_notices_manager.send_notice
+        self._rlsn._server_notices_manager.send_notice = Mock()
+        self._rlsn._state.get_current_state = Mock(return_value=defer.succeed(None))
+        self._rlsn._store.get_events = Mock(return_value=defer.succeed({}))
+
+        self._send_notice = self._rlsn._server_notices_manager.send_notice
+
+        self.hs.config.limit_usage_by_mau = True
+        self.user_id = "@user_id:test"
+
+        # self.server_notices_mxid = "@server:test"
+        # self.server_notices_mxid_display_name = None
+        # self.server_notices_mxid_avatar_url = None
+        # self.server_notices_room_name = "Server Notices"
+
+        self._rlsn._server_notices_manager.get_notice_room_for_user = Mock(
+            returnValue=""
+        )
+        self._rlsn._store.add_tag_to_room = Mock()
+        self._rlsn._store.get_tags_for_room = Mock(return_value={})
+        self.hs.config.admin_contact = "mailto:user@test.com"
+
+    @defer.inlineCallbacks
+    def test_maybe_send_server_notice_to_user_flag_off(self):
+        """Tests cases where the flags indicate nothing to do"""
+        # test hs disabled case
+        self.hs.config.hs_disabled = True
+
+        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+        self._send_notice.assert_not_called()
+        # Test when mau limiting disabled
+        self.hs.config.hs_disabled = False
+        self.hs.limit_usage_by_mau = False
+        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+        self._send_notice.assert_not_called()
+
+    @defer.inlineCallbacks
+    def test_maybe_send_server_notice_to_user_remove_blocked_notice(self):
+        """Test when user has blocked notice, but should have it removed"""
+
+        self._rlsn._auth.check_auth_blocking = Mock()
+        mock_event = Mock(
+            type=EventTypes.Message,
+            content={"msgtype": ServerNoticeMsgType},
+        )
+        self._rlsn._store.get_events = Mock(return_value=defer.succeed(
+            {"123": mock_event}
+        ))
+
+        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        # Would be better to check the content, but once == remove blocking event
+        self._send_notice.assert_called_once()
+
+    @defer.inlineCallbacks
+    def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self):
+        """Test when user has blocked notice, but notice ought to be there (NOOP)"""
+        self._rlsn._auth.check_auth_blocking = Mock(
+            side_effect=ResourceLimitError(403, 'foo')
+        )
+
+        mock_event = Mock(
+            type=EventTypes.Message,
+            content={"msgtype": ServerNoticeMsgType},
+        )
+        self._rlsn._store.get_events = Mock(return_value=defer.succeed(
+            {"123": mock_event}
+        ))
+        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+        self._send_notice.assert_not_called()
+
+    @defer.inlineCallbacks
+    def test_maybe_send_server_notice_to_user_add_blocked_notice(self):
+        """Test when user does not have blocked notice, but should have one"""
+
+        self._rlsn._auth.check_auth_blocking = Mock(
+            side_effect=ResourceLimitError(403, 'foo')
+        )
+        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+        # Would be better to check contents, but 2 calls == set blocking event
+        self.assertTrue(self._send_notice.call_count == 2)
+
+    @defer.inlineCallbacks
+    def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self):
+        """Test when user does not have blocked notice, nor should they (NOOP)"""
+
+        self._rlsn._auth.check_auth_blocking = Mock()
+
+        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+        self._send_notice.assert_not_called()
+
+    @defer.inlineCallbacks
+    def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self):
+
+        """Test when user is not part of the MAU cohort - this should not ever
+        happen - but ...
+        """
+
+        self._rlsn._auth.check_auth_blocking = Mock()
+        self._rlsn._store.user_last_seen_monthly_active = Mock(
+            return_value=defer.succeed(None)
+        )
+        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+        self._send_notice.assert_not_called()
+
+
+class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase):
+    @defer.inlineCallbacks
+    def setUp(self):
+        self.hs = yield setup_test_homeserver(self.addCleanup)
+        self.store = self.hs.get_datastore()
+        self.server_notices_sender = self.hs.get_server_notices_sender()
+        self.server_notices_manager = self.hs.get_server_notices_manager()
+        self.event_source = self.hs.get_event_sources()
+
+        # relying on [1] is far from ideal, but the only case where
+        # ResourceLimitsServerNotices class needs to be isolated is this test,
+        # general code should never have a reason to do so ...
+        self._rlsn = self.server_notices_sender._server_notices[1]
+        if not isinstance(self._rlsn, ResourceLimitsServerNotices):
+            raise Exception("Failed to find reference to ResourceLimitsServerNotices")
+
+        self.hs.config.limit_usage_by_mau = True
+        self.hs.config.hs_disabled = False
+        self.hs.config.max_mau_value = 5
+        self.hs.config.server_notices_mxid = "@server:test"
+        self.hs.config.server_notices_mxid_display_name = None
+        self.hs.config.server_notices_mxid_avatar_url = None
+        self.hs.config.server_notices_room_name = "Test Server Notice Room"
+
+        self.user_id = "@user_id:test"
+
+        self.hs.config.admin_contact = "mailto:user@test.com"
+
+    @defer.inlineCallbacks
+    def test_server_notice_only_sent_once(self):
+        self.store.get_monthly_active_count = Mock(
+            return_value=1000,
+        )
+
+        self.store.user_last_seen_monthly_active = Mock(
+            return_value=1000,
+        )
+
+        # Call the function multiple times to ensure we only send the notice once
+        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+        # Now lets get the last load of messages in the service notice room and
+        # check that there is only one server notice
+        room_id = yield self.server_notices_manager.get_notice_room_for_user(
+            self.user_id,
+        )
+
+        token = yield self.event_source.get_current_token()
+        events, _ = yield self.store.get_recent_events_for_room(
+            room_id, limit=100, end_token=token.room_key,
+        )
+
+        count = 0
+        for event in events:
+            if event.type != EventTypes.Message:
+                continue
+            if event.content.get("msgtype") != ServerNoticeMsgType:
+                continue
+
+            count += 1
+
+        self.assertEqual(count, 1)
diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py
index 3cfa21c9f8..52eb05bfbf 100644
--- a/tests/storage/test__base.py
+++ b/tests/storage/test__base.py
@@ -14,18 +14,17 @@
 # limitations under the License.
 
 
-from tests import unittest
-from twisted.internet import defer
-
 from mock import Mock
 
-from synapse.util.async import ObservableDeferred
+from twisted.internet import defer
 
+from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.caches.descriptors import Cache, cached
 
+from tests import unittest
 
-class CacheTestCase(unittest.TestCase):
 
+class CacheTestCase(unittest.TestCase):
     def setUp(self):
         self.cache = Cache("test")
 
@@ -97,7 +96,6 @@ class CacheTestCase(unittest.TestCase):
 
 
 class CacheDecoratorTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def test_passthrough(self):
         class A(object):
@@ -180,8 +178,7 @@ class CacheDecoratorTestCase(unittest.TestCase):
             yield a.func(k)
 
         self.assertTrue(
-            callcount[0] >= 14,
-            msg="Expected callcount >= 14, got %d" % (callcount[0])
+            callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0])
         )
 
     def test_prefill(self):
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index 00825498b1..c893990454 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -12,25 +12,28 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import json
+import os
 import tempfile
-from synapse.config._base import ConfigError
-from tests import unittest
+
+from mock import Mock
+
+import yaml
+
 from twisted.internet import defer
 
-from tests.utils import setup_test_homeserver
 from synapse.appservice import ApplicationService, ApplicationServiceState
+from synapse.config._base import ConfigError
 from synapse.storage.appservice import (
-    ApplicationServiceStore, ApplicationServiceTransactionStore
+    ApplicationServiceStore,
+    ApplicationServiceTransactionStore,
 )
 
-import json
-import os
-import yaml
-from mock import Mock
+from tests import unittest
+from tests.utils import setup_test_homeserver
 
 
 class ApplicationServiceStoreTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
         self.as_yaml_files = []
@@ -40,6 +43,7 @@ class ApplicationServiceStoreTestCase(unittest.TestCase):
             password_providers=[],
         )
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             config=config,
             federation_sender=Mock(),
             federation_client=Mock(),
@@ -49,11 +53,7 @@ class ApplicationServiceStoreTestCase(unittest.TestCase):
         self.as_url = "some_url"
         self.as_id = "as1"
         self._add_appservice(
-            self.as_token,
-            self.as_id,
-            self.as_url,
-            "some_hs_token",
-            "bob"
+            self.as_token, self.as_id, self.as_url, "some_hs_token", "bob"
         )
         self._add_appservice("token2", "as2", "some_url", "some_hs_token", "bob")
         self._add_appservice("token3", "as3", "some_url", "some_hs_token", "bob")
@@ -69,8 +69,14 @@ class ApplicationServiceStoreTestCase(unittest.TestCase):
                 pass
 
     def _add_appservice(self, as_token, id, url, hs_token, sender):
-        as_yaml = dict(url=url, as_token=as_token, hs_token=hs_token,
-                       id=id, sender_localpart=sender, namespaces={})
+        as_yaml = dict(
+            url=url,
+            as_token=as_token,
+            hs_token=hs_token,
+            id=id,
+            sender_localpart=sender,
+            namespaces={},
+        )
         # use the token as the filename
         with open(as_token, 'w') as outfile:
             outfile.write(yaml.dump(as_yaml))
@@ -81,24 +87,13 @@ class ApplicationServiceStoreTestCase(unittest.TestCase):
         self.assertEquals(service, None)
 
     def test_retrieval_of_service(self):
-        stored_service = self.store.get_app_service_by_token(
-            self.as_token
-        )
+        stored_service = self.store.get_app_service_by_token(self.as_token)
         self.assertEquals(stored_service.token, self.as_token)
         self.assertEquals(stored_service.id, self.as_id)
         self.assertEquals(stored_service.url, self.as_url)
-        self.assertEquals(
-            stored_service.namespaces[ApplicationService.NS_ALIASES],
-            []
-        )
-        self.assertEquals(
-            stored_service.namespaces[ApplicationService.NS_ROOMS],
-            []
-        )
-        self.assertEquals(
-            stored_service.namespaces[ApplicationService.NS_USERS],
-            []
-        )
+        self.assertEquals(stored_service.namespaces[ApplicationService.NS_ALIASES], [])
+        self.assertEquals(stored_service.namespaces[ApplicationService.NS_ROOMS], [])
+        self.assertEquals(stored_service.namespaces[ApplicationService.NS_USERS], [])
 
     def test_retrieval_of_all_services(self):
         services = self.store.get_app_services()
@@ -106,7 +101,6 @@ class ApplicationServiceStoreTestCase(unittest.TestCase):
 
 
 class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
         self.as_yaml_files = []
@@ -117,6 +111,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
             password_providers=[],
         )
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             config=config,
             federation_sender=Mock(),
             federation_client=Mock(),
@@ -124,26 +119,10 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         self.db_pool = hs.get_db_pool()
 
         self.as_list = [
-            {
-                "token": "token1",
-                "url": "https://matrix-as.org",
-                "id": "id_1"
-            },
-            {
-                "token": "alpha_tok",
-                "url": "https://alpha.com",
-                "id": "id_alpha"
-            },
-            {
-                "token": "beta_tok",
-                "url": "https://beta.com",
-                "id": "id_beta"
-            },
-            {
-                "token": "gamma_tok",
-                "url": "https://gamma.com",
-                "id": "id_gamma"
-            },
+            {"token": "token1", "url": "https://matrix-as.org", "id": "id_1"},
+            {"token": "alpha_tok", "url": "https://alpha.com", "id": "id_alpha"},
+            {"token": "beta_tok", "url": "https://beta.com", "id": "id_beta"},
+            {"token": "gamma_tok", "url": "https://gamma.com", "id": "id_gamma"},
         ]
         for s in self.as_list:
             yield self._add_service(s["url"], s["token"], s["id"])
@@ -153,8 +132,14 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         self.store = TestTransactionStore(None, hs)
 
     def _add_service(self, url, as_token, id):
-        as_yaml = dict(url=url, as_token=as_token, hs_token="something",
-                       id=id, sender_localpart="a_sender", namespaces={})
+        as_yaml = dict(
+            url=url,
+            as_token=as_token,
+            hs_token="something",
+            id=id,
+            sender_localpart="a_sender",
+            namespaces={},
+        )
         # use the token as the filename
         with open(as_token, 'w') as outfile:
             outfile.write(yaml.dump(as_yaml))
@@ -164,21 +149,21 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         return self.db_pool.runQuery(
             "INSERT INTO application_services_state(as_id, state, last_txn) "
             "VALUES(?,?,?)",
-            (id, state, txn)
+            (id, state, txn),
         )
 
     def _insert_txn(self, as_id, txn_id, events):
         return self.db_pool.runQuery(
             "INSERT INTO application_services_txns(as_id, txn_id, event_ids) "
             "VALUES(?,?,?)",
-            (as_id, txn_id, json.dumps([e.event_id for e in events]))
+            (as_id, txn_id, json.dumps([e.event_id for e in events])),
         )
 
     def _set_last_txn(self, as_id, txn_id):
         return self.db_pool.runQuery(
             "INSERT INTO application_services_state(as_id, last_txn, state) "
             "VALUES(?,?,?)",
-            (as_id, txn_id, ApplicationServiceState.UP)
+            (as_id, txn_id, ApplicationServiceState.UP),
         )
 
     @defer.inlineCallbacks
@@ -189,24 +174,16 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_appservice_state_up(self):
-        yield self._set_state(
-            self.as_list[0]["id"], ApplicationServiceState.UP
-        )
+        yield self._set_state(self.as_list[0]["id"], ApplicationServiceState.UP)
         service = Mock(id=self.as_list[0]["id"])
         state = yield self.store.get_appservice_state(service)
         self.assertEquals(ApplicationServiceState.UP, state)
 
     @defer.inlineCallbacks
     def test_get_appservice_state_down(self):
-        yield self._set_state(
-            self.as_list[0]["id"], ApplicationServiceState.UP
-        )
-        yield self._set_state(
-            self.as_list[1]["id"], ApplicationServiceState.DOWN
-        )
-        yield self._set_state(
-            self.as_list[2]["id"], ApplicationServiceState.DOWN
-        )
+        yield self._set_state(self.as_list[0]["id"], ApplicationServiceState.UP)
+        yield self._set_state(self.as_list[1]["id"], ApplicationServiceState.DOWN)
+        yield self._set_state(self.as_list[2]["id"], ApplicationServiceState.DOWN)
         service = Mock(id=self.as_list[1]["id"])
         state = yield self.store.get_appservice_state(service)
         self.assertEquals(ApplicationServiceState.DOWN, state)
@@ -221,34 +198,22 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_set_appservices_state_down(self):
         service = Mock(id=self.as_list[1]["id"])
-        yield self.store.set_appservice_state(
-            service,
-            ApplicationServiceState.DOWN
-        )
+        yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
         rows = yield self.db_pool.runQuery(
             "SELECT as_id FROM application_services_state WHERE state=?",
-            (ApplicationServiceState.DOWN,)
+            (ApplicationServiceState.DOWN,),
         )
         self.assertEquals(service.id, rows[0][0])
 
     @defer.inlineCallbacks
     def test_set_appservices_state_multiple_up(self):
         service = Mock(id=self.as_list[1]["id"])
-        yield self.store.set_appservice_state(
-            service,
-            ApplicationServiceState.UP
-        )
-        yield self.store.set_appservice_state(
-            service,
-            ApplicationServiceState.DOWN
-        )
-        yield self.store.set_appservice_state(
-            service,
-            ApplicationServiceState.UP
-        )
+        yield self.store.set_appservice_state(service, ApplicationServiceState.UP)
+        yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
+        yield self.store.set_appservice_state(service, ApplicationServiceState.UP)
         rows = yield self.db_pool.runQuery(
             "SELECT as_id FROM application_services_state WHERE state=?",
-            (ApplicationServiceState.UP,)
+            (ApplicationServiceState.UP,),
         )
         self.assertEquals(service.id, rows[0][0])
 
@@ -315,14 +280,13 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
 
         res = yield self.db_pool.runQuery(
             "SELECT last_txn FROM application_services_state WHERE as_id=?",
-            (service.id,)
+            (service.id,),
         )
         self.assertEquals(1, len(res))
         self.assertEquals(txn_id, res[0][0])
 
         res = yield self.db_pool.runQuery(
-            "SELECT * FROM application_services_txns WHERE txn_id=?",
-            (txn_id,)
+            "SELECT * FROM application_services_txns WHERE txn_id=?", (txn_id,)
         )
         self.assertEquals(0, len(res))
 
@@ -336,17 +300,15 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         yield self.store.complete_appservice_txn(txn_id=txn_id, service=service)
 
         res = yield self.db_pool.runQuery(
-            "SELECT last_txn, state FROM application_services_state WHERE "
-            "as_id=?",
-            (service.id,)
+            "SELECT last_txn, state FROM application_services_state WHERE " "as_id=?",
+            (service.id,),
         )
         self.assertEquals(1, len(res))
         self.assertEquals(txn_id, res[0][0])
         self.assertEquals(ApplicationServiceState.UP, res[0][1])
 
         res = yield self.db_pool.runQuery(
-            "SELECT * FROM application_services_txns WHERE txn_id=?",
-            (txn_id,)
+            "SELECT * FROM application_services_txns WHERE txn_id=?", (txn_id,)
         )
         self.assertEquals(0, len(res))
 
@@ -378,12 +340,8 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_appservices_by_state_single(self):
-        yield self._set_state(
-            self.as_list[0]["id"], ApplicationServiceState.DOWN
-        )
-        yield self._set_state(
-            self.as_list[1]["id"], ApplicationServiceState.UP
-        )
+        yield self._set_state(self.as_list[0]["id"], ApplicationServiceState.DOWN)
+        yield self._set_state(self.as_list[1]["id"], ApplicationServiceState.UP)
 
         services = yield self.store.get_appservices_by_state(
             ApplicationServiceState.DOWN
@@ -393,18 +351,10 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_appservices_by_state_multiple(self):
-        yield self._set_state(
-            self.as_list[0]["id"], ApplicationServiceState.DOWN
-        )
-        yield self._set_state(
-            self.as_list[1]["id"], ApplicationServiceState.UP
-        )
-        yield self._set_state(
-            self.as_list[2]["id"], ApplicationServiceState.DOWN
-        )
-        yield self._set_state(
-            self.as_list[3]["id"], ApplicationServiceState.UP
-        )
+        yield self._set_state(self.as_list[0]["id"], ApplicationServiceState.DOWN)
+        yield self._set_state(self.as_list[1]["id"], ApplicationServiceState.UP)
+        yield self._set_state(self.as_list[2]["id"], ApplicationServiceState.DOWN)
+        yield self._set_state(self.as_list[3]["id"], ApplicationServiceState.UP)
 
         services = yield self.store.get_appservices_by_state(
             ApplicationServiceState.DOWN
@@ -412,20 +362,17 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         self.assertEquals(2, len(services))
         self.assertEquals(
             set([self.as_list[2]["id"], self.as_list[0]["id"]]),
-            set([services[0].id, services[1].id])
+            set([services[0].id, services[1].id]),
         )
 
 
 # required for ApplicationServiceTransactionStoreTestCase tests
-class TestTransactionStore(ApplicationServiceTransactionStore,
-                           ApplicationServiceStore):
-
+class TestTransactionStore(ApplicationServiceTransactionStore, ApplicationServiceStore):
     def __init__(self, db_conn, hs):
         super(TestTransactionStore, self).__init__(db_conn, hs)
 
 
 class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
-
     def _write_config(self, suffix, **kwargs):
         vals = {
             "id": "id" + suffix,
@@ -448,10 +395,10 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
         f2 = self._write_config(suffix="2")
 
         config = Mock(
-            app_service_config_files=[f1, f2], event_cache_size=1,
-            password_providers=[]
+            app_service_config_files=[f1, f2], event_cache_size=1, password_providers=[]
         )
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             config=config,
             datastore=Mock(),
             federation_sender=Mock(),
@@ -466,10 +413,10 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
         f2 = self._write_config(id="id", suffix="2")
 
         config = Mock(
-            app_service_config_files=[f1, f2], event_cache_size=1,
-            password_providers=[]
+            app_service_config_files=[f1, f2], event_cache_size=1, password_providers=[]
         )
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             config=config,
             datastore=Mock(),
             federation_sender=Mock(),
@@ -490,10 +437,10 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
         f2 = self._write_config(as_token="as_token", suffix="2")
 
         config = Mock(
-            app_service_config_files=[f1, f2], event_cache_size=1,
-            password_providers=[]
+            app_service_config_files=[f1, f2], event_cache_size=1, password_providers=[]
         )
         hs = yield setup_test_homeserver(
+            self.addCleanup,
             config=config,
             datastore=Mock(),
             federation_sender=Mock(),
diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py
index 1286b4ce2d..81403727c5 100644
--- a/tests/storage/test_background_update.py
+++ b/tests/storage/test_background_update.py
@@ -1,16 +1,17 @@
-from tests import unittest
+from mock import Mock
+
 from twisted.internet import defer
 
+from tests import unittest
 from tests.utils import setup_test_homeserver
 
-from mock import Mock
-
 
 class BackgroundUpdateTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield setup_test_homeserver()  # type: synapse.server.HomeServer
+        hs = yield setup_test_homeserver(
+            self.addCleanup
+        )  # type: synapse.server.HomeServer
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
 
@@ -51,9 +52,7 @@ class BackgroundUpdateTestCase(unittest.TestCase):
         yield self.store.start_background_update("test_update", {"my_key": 1})
 
         self.update_handler.reset_mock()
-        result = yield self.store.do_next_background_update(
-            duration_ms * desired_count
-        )
+        result = yield self.store.do_next_background_update(duration_ms * desired_count)
         self.assertIsNotNone(result)
         self.update_handler.assert_called_once_with(
             {"my_key": 1}, self.store.DEFAULT_BACKGROUND_BATCH_SIZE
@@ -67,18 +66,12 @@ class BackgroundUpdateTestCase(unittest.TestCase):
 
         self.update_handler.side_effect = update
         self.update_handler.reset_mock()
-        result = yield self.store.do_next_background_update(
-            duration_ms * desired_count
-        )
+        result = yield self.store.do_next_background_update(duration_ms * desired_count)
         self.assertIsNotNone(result)
-        self.update_handler.assert_called_once_with(
-            {"my_key": 2}, desired_count
-        )
+        self.update_handler.assert_called_once_with({"my_key": 2}, desired_count)
 
         # third step: we don't expect to be called any more
         self.update_handler.reset_mock()
-        result = yield self.store.do_next_background_update(
-            duration_ms * desired_count
-        )
+        result = yield self.store.do_next_background_update(duration_ms * desired_count)
         self.assertIsNone(result)
         self.assertFalse(self.update_handler.called)
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index 0ac910e76f..829f47d2e8 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -14,18 +14,18 @@
 # limitations under the License.
 
 
-from tests import unittest
-from twisted.internet import defer
+from collections import OrderedDict
 
 from mock import Mock
 
-from collections import OrderedDict
-
-from synapse.server import HomeServer
+from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.engines import create_engine
 
+from tests import unittest
+from tests.utils import TestHomeServer
+
 
 class SQLBaseStoreTestCase(unittest.TestCase):
     """ Test the "simple" SQL generating methods in SQLBaseStore. """
@@ -40,16 +40,18 @@ class SQLBaseStoreTestCase(unittest.TestCase):
 
         def runInteraction(func, *args, **kwargs):
             return defer.succeed(func(self.mock_txn, *args, **kwargs))
+
         self.db_pool.runInteraction = runInteraction
 
         def runWithConnection(func, *args, **kwargs):
             return defer.succeed(func(self.mock_conn, *args, **kwargs))
+
         self.db_pool.runWithConnection = runWithConnection
 
         config = Mock()
         config.event_cache_size = 1
         config.database_config = {"name": "sqlite3"}
-        hs = HomeServer(
+        hs = TestHomeServer(
             "test",
             db_pool=self.db_pool,
             config=config,
@@ -63,8 +65,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         self.mock_txn.rowcount = 1
 
         yield self.datastore._simple_insert(
-            table="tablename",
-            values={"columname": "Value"}
+            table="tablename", values={"columname": "Value"}
         )
 
         self.mock_txn.execute.assert_called_with(
@@ -78,12 +79,11 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         yield self.datastore._simple_insert(
             table="tablename",
             # Use OrderedDict() so we can assert on the SQL generated
-            values=OrderedDict([("colA", 1), ("colB", 2), ("colC", 3)])
+            values=OrderedDict([("colA", 1), ("colB", 2), ("colC", 3)]),
         )
 
         self.mock_txn.execute.assert_called_with(
-            "INSERT INTO tablename (colA, colB, colC) VALUES(?, ?, ?)",
-            (1, 2, 3,)
+            "INSERT INTO tablename (colA, colB, colC) VALUES(?, ?, ?)", (1, 2, 3)
         )
 
     @defer.inlineCallbacks
@@ -92,9 +92,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         self.mock_txn.__iter__ = Mock(return_value=iter([("Value",)]))
 
         value = yield self.datastore._simple_select_one_onecol(
-            table="tablename",
-            keyvalues={"keycol": "TheKey"},
-            retcol="retcol"
+            table="tablename", keyvalues={"keycol": "TheKey"}, retcol="retcol"
         )
 
         self.assertEquals("Value", value)
@@ -110,13 +108,12 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         ret = yield self.datastore._simple_select_one(
             table="tablename",
             keyvalues={"keycol": "TheKey"},
-            retcols=["colA", "colB", "colC"]
+            retcols=["colA", "colB", "colC"],
         )
 
         self.assertEquals({"colA": 1, "colB": 2, "colC": 3}, ret)
         self.mock_txn.execute.assert_called_with(
-            "SELECT colA, colB, colC FROM tablename WHERE keycol = ?",
-            ["TheKey"]
+            "SELECT colA, colB, colC FROM tablename WHERE keycol = ?", ["TheKey"]
         )
 
     @defer.inlineCallbacks
@@ -128,7 +125,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
             table="tablename",
             keyvalues={"keycol": "Not here"},
             retcols=["colA"],
-            allow_none=True
+            allow_none=True,
         )
 
         self.assertFalse(ret)
@@ -137,20 +134,15 @@ class SQLBaseStoreTestCase(unittest.TestCase):
     def test_select_list(self):
         self.mock_txn.rowcount = 3
         self.mock_txn.__iter__ = Mock(return_value=iter([(1,), (2,), (3,)]))
-        self.mock_txn.description = (
-            ("colA", None, None, None, None, None, None),
-        )
+        self.mock_txn.description = (("colA", None, None, None, None, None, None),)
 
         ret = yield self.datastore._simple_select_list(
-            table="tablename",
-            keyvalues={"keycol": "A set"},
-            retcols=["colA"],
+            table="tablename", keyvalues={"keycol": "A set"}, retcols=["colA"]
         )
 
         self.assertEquals([{"colA": 1}, {"colA": 2}, {"colA": 3}], ret)
         self.mock_txn.execute.assert_called_with(
-            "SELECT colA FROM tablename WHERE keycol = ?",
-            ["A set"]
+            "SELECT colA FROM tablename WHERE keycol = ?", ["A set"]
         )
 
     @defer.inlineCallbacks
@@ -160,12 +152,12 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         yield self.datastore._simple_update_one(
             table="tablename",
             keyvalues={"keycol": "TheKey"},
-            updatevalues={"columnname": "New Value"}
+            updatevalues={"columnname": "New Value"},
         )
 
         self.mock_txn.execute.assert_called_with(
             "UPDATE tablename SET columnname = ? WHERE keycol = ?",
-            ["New Value", "TheKey"]
+            ["New Value", "TheKey"],
         )
 
     @defer.inlineCallbacks
@@ -175,13 +167,12 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         yield self.datastore._simple_update_one(
             table="tablename",
             keyvalues=OrderedDict([("colA", 1), ("colB", 2)]),
-            updatevalues=OrderedDict([("colC", 3), ("colD", 4)])
+            updatevalues=OrderedDict([("colC", 3), ("colD", 4)]),
         )
 
         self.mock_txn.execute.assert_called_with(
-            "UPDATE tablename SET colC = ?, colD = ? WHERE"
-            " colA = ? AND colB = ?",
-            [3, 4, 1, 2]
+            "UPDATE tablename SET colC = ?, colD = ? WHERE" " colA = ? AND colB = ?",
+            [3, 4, 1, 2],
         )
 
     @defer.inlineCallbacks
@@ -189,8 +180,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         self.mock_txn.rowcount = 1
 
         yield self.datastore._simple_delete_one(
-            table="tablename",
-            keyvalues={"keycol": "Go away"},
+            table="tablename", keyvalues={"keycol": "Go away"}
         )
 
         self.mock_txn.execute.assert_called_with(
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index bd6fda6cb1..c2e88bdbaf 100644
--- a/tests/storage/test_client_ips.py
+++ b/tests/storage/test_client_ips.py
@@ -12,6 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from mock import Mock
 
 from twisted.internet import defer
 
@@ -27,17 +28,16 @@ class ClientIpStoreTestCase(tests.unittest.TestCase):
 
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield tests.utils.setup_test_homeserver()
-        self.store = hs.get_datastore()
-        self.clock = hs.get_clock()
+        self.hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
+        self.store = self.hs.get_datastore()
+        self.clock = self.hs.get_clock()
 
     @defer.inlineCallbacks
     def test_insert_new_client_ip(self):
         self.clock.now = 12345678
         user_id = "@user:id"
         yield self.store.insert_client_ip(
-            user_id,
-            "access_token", "ip", "user_agent", "device_id",
+            user_id, "access_token", "ip", "user_agent", "device_id"
         )
 
         result = yield self.store.get_last_client_ip_by_device(user_id, "device_id")
@@ -52,5 +52,64 @@ class ClientIpStoreTestCase(tests.unittest.TestCase):
                 "user_agent": "user_agent",
                 "last_seen": 12345678000,
             },
-            r
+            r,
         )
+
+    @defer.inlineCallbacks
+    def test_disabled_monthly_active_user(self):
+        self.hs.config.limit_usage_by_mau = False
+        self.hs.config.max_mau_value = 50
+        user_id = "@user:server"
+        yield self.store.insert_client_ip(
+            user_id, "access_token", "ip", "user_agent", "device_id"
+        )
+        active = yield self.store.user_last_seen_monthly_active(user_id)
+        self.assertFalse(active)
+
+    @defer.inlineCallbacks
+    def test_adding_monthly_active_user_when_full(self):
+        self.hs.config.limit_usage_by_mau = True
+        self.hs.config.max_mau_value = 50
+        lots_of_users = 100
+        user_id = "@user:server"
+
+        self.store.get_monthly_active_count = Mock(
+            return_value=defer.succeed(lots_of_users)
+        )
+        yield self.store.insert_client_ip(
+            user_id, "access_token", "ip", "user_agent", "device_id"
+        )
+        active = yield self.store.user_last_seen_monthly_active(user_id)
+        self.assertFalse(active)
+
+    @defer.inlineCallbacks
+    def test_adding_monthly_active_user_when_space(self):
+        self.hs.config.limit_usage_by_mau = True
+        self.hs.config.max_mau_value = 50
+        user_id = "@user:server"
+        active = yield self.store.user_last_seen_monthly_active(user_id)
+        self.assertFalse(active)
+
+        yield self.store.insert_client_ip(
+            user_id, "access_token", "ip", "user_agent", "device_id"
+        )
+        active = yield self.store.user_last_seen_monthly_active(user_id)
+        self.assertTrue(active)
+
+    @defer.inlineCallbacks
+    def test_updating_monthly_active_user_when_space(self):
+        self.hs.config.limit_usage_by_mau = True
+        self.hs.config.max_mau_value = 50
+        user_id = "@user:server"
+
+        active = yield self.store.user_last_seen_monthly_active(user_id)
+        self.assertFalse(active)
+
+        yield self.store.insert_client_ip(
+            user_id, "access_token", "ip", "user_agent", "device_id"
+        )
+        yield self.store.insert_client_ip(
+            user_id, "access_token", "ip", "user_agent", "device_id"
+        )
+        active = yield self.store.user_last_seen_monthly_active(user_id)
+        self.assertTrue(active)
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index f8725acea0..aef4dfaf57 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -16,6 +16,7 @@
 from twisted.internet import defer
 
 import synapse.api.errors
+
 import tests.unittest
 import tests.utils
 
@@ -27,68 +28,64 @@ class DeviceStoreTestCase(tests.unittest.TestCase):
 
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield tests.utils.setup_test_homeserver()
+        hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
 
         self.store = hs.get_datastore()
 
     @defer.inlineCallbacks
     def test_store_new_device(self):
-        yield self.store.store_device(
-            "user_id", "device_id", "display_name"
-        )
+        yield self.store.store_device("user_id", "device_id", "display_name")
 
         res = yield self.store.get_device("user_id", "device_id")
-        self.assertDictContainsSubset({
-            "user_id": "user_id",
-            "device_id": "device_id",
-            "display_name": "display_name",
-        }, res)
+        self.assertDictContainsSubset(
+            {
+                "user_id": "user_id",
+                "device_id": "device_id",
+                "display_name": "display_name",
+            },
+            res,
+        )
 
     @defer.inlineCallbacks
     def test_get_devices_by_user(self):
-        yield self.store.store_device(
-            "user_id", "device1", "display_name 1"
-        )
-        yield self.store.store_device(
-            "user_id", "device2", "display_name 2"
-        )
-        yield self.store.store_device(
-            "user_id2", "device3", "display_name 3"
-        )
+        yield self.store.store_device("user_id", "device1", "display_name 1")
+        yield self.store.store_device("user_id", "device2", "display_name 2")
+        yield self.store.store_device("user_id2", "device3", "display_name 3")
 
         res = yield self.store.get_devices_by_user("user_id")
         self.assertEqual(2, len(res.keys()))
-        self.assertDictContainsSubset({
-            "user_id": "user_id",
-            "device_id": "device1",
-            "display_name": "display_name 1",
-        }, res["device1"])
-        self.assertDictContainsSubset({
-            "user_id": "user_id",
-            "device_id": "device2",
-            "display_name": "display_name 2",
-        }, res["device2"])
+        self.assertDictContainsSubset(
+            {
+                "user_id": "user_id",
+                "device_id": "device1",
+                "display_name": "display_name 1",
+            },
+            res["device1"],
+        )
+        self.assertDictContainsSubset(
+            {
+                "user_id": "user_id",
+                "device_id": "device2",
+                "display_name": "display_name 2",
+            },
+            res["device2"],
+        )
 
     @defer.inlineCallbacks
     def test_update_device(self):
-        yield self.store.store_device(
-            "user_id", "device_id", "display_name 1"
-        )
+        yield self.store.store_device("user_id", "device_id", "display_name 1")
 
         res = yield self.store.get_device("user_id", "device_id")
         self.assertEqual("display_name 1", res["display_name"])
 
         # do a no-op first
-        yield self.store.update_device(
-            "user_id", "device_id",
-        )
+        yield self.store.update_device("user_id", "device_id")
         res = yield self.store.get_device("user_id", "device_id")
         self.assertEqual("display_name 1", res["display_name"])
 
         # do the update
         yield self.store.update_device(
-            "user_id", "device_id",
-            new_display_name="display_name 2",
+            "user_id", "device_id", new_display_name="display_name 2"
         )
 
         # check it worked
@@ -99,7 +96,6 @@ class DeviceStoreTestCase(tests.unittest.TestCase):
     def test_update_unknown_device(self):
         with self.assertRaises(synapse.api.errors.StoreError) as cm:
             yield self.store.update_device(
-                "user_id", "unknown_device_id",
-                new_display_name="display_name 2",
+                "user_id", "unknown_device_id", new_display_name="display_name 2"
             )
         self.assertEqual(404, cm.exception.code)
diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py
index 95709cd50a..b4510c1c8d 100644
--- a/tests/storage/test_directory.py
+++ b/tests/storage/test_directory.py
@@ -14,20 +14,19 @@
 # limitations under the License.
 
 
-from tests import unittest
 from twisted.internet import defer
 
 from synapse.storage.directory import DirectoryStore
-from synapse.types import RoomID, RoomAlias
+from synapse.types import RoomAlias, RoomID
 
+from tests import unittest
 from tests.utils import setup_test_homeserver
 
 
 class DirectoryStoreTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield setup_test_homeserver()
+        hs = yield setup_test_homeserver(self.addCleanup)
 
         self.store = DirectoryStore(None, hs)
 
@@ -37,38 +36,29 @@ class DirectoryStoreTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_room_to_alias(self):
         yield self.store.create_room_alias_association(
-            room_alias=self.alias,
-            room_id=self.room.to_string(),
-            servers=["test"],
+            room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
         )
 
         self.assertEquals(
             ["#my-room:test"],
-            (yield self.store.get_aliases_for_room(self.room.to_string()))
+            (yield self.store.get_aliases_for_room(self.room.to_string())),
         )
 
     @defer.inlineCallbacks
     def test_alias_to_room(self):
         yield self.store.create_room_alias_association(
-            room_alias=self.alias,
-            room_id=self.room.to_string(),
-            servers=["test"],
+            room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
         )
 
         self.assertObjectHasAttributes(
-            {
-                "room_id": self.room.to_string(),
-                "servers": ["test"],
-            },
-            (yield self.store.get_association_from_room_alias(self.alias))
+            {"room_id": self.room.to_string(), "servers": ["test"]},
+            (yield self.store.get_association_from_room_alias(self.alias)),
         )
 
     @defer.inlineCallbacks
     def test_delete_alias(self):
         yield self.store.create_room_alias_association(
-            room_alias=self.alias,
-            room_id=self.room.to_string(),
-            servers=["test"],
+            room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
         )
 
         room_id = yield self.store.delete_room_alias(self.alias)
diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py
index 84ce492a2c..8f0aaece40 100644
--- a/tests/storage/test_end_to_end_keys.py
+++ b/tests/storage/test_end_to_end_keys.py
@@ -26,8 +26,7 @@ class EndToEndKeyStoreTestCase(tests.unittest.TestCase):
 
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield tests.utils.setup_test_homeserver()
-
+        hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
         self.store = hs.get_datastore()
 
     @defer.inlineCallbacks
@@ -35,70 +34,49 @@ class EndToEndKeyStoreTestCase(tests.unittest.TestCase):
         now = 1470174257070
         json = {"key": "value"}
 
-        yield self.store.store_device(
-            "user", "device", None
-        )
+        yield self.store.store_device("user", "device", None)
 
-        yield self.store.set_e2e_device_keys(
-            "user", "device", now, json)
+        yield self.store.set_e2e_device_keys("user", "device", now, json)
 
         res = yield self.store.get_e2e_device_keys((("user", "device"),))
         self.assertIn("user", res)
         self.assertIn("device", res["user"])
         dev = res["user"]["device"]
-        self.assertDictContainsSubset({
-            "keys": json,
-            "device_display_name": None,
-        }, dev)
+        self.assertDictContainsSubset({"keys": json, "device_display_name": None}, dev)
 
     @defer.inlineCallbacks
     def test_get_key_with_device_name(self):
         now = 1470174257070
         json = {"key": "value"}
 
-        yield self.store.set_e2e_device_keys(
-            "user", "device", now, json)
-        yield self.store.store_device(
-            "user", "device", "display_name"
-        )
+        yield self.store.set_e2e_device_keys("user", "device", now, json)
+        yield self.store.store_device("user", "device", "display_name")
 
         res = yield self.store.get_e2e_device_keys((("user", "device"),))
         self.assertIn("user", res)
         self.assertIn("device", res["user"])
         dev = res["user"]["device"]
-        self.assertDictContainsSubset({
-            "keys": json,
-            "device_display_name": "display_name",
-        }, dev)
+        self.assertDictContainsSubset(
+            {"keys": json, "device_display_name": "display_name"}, dev
+        )
 
     @defer.inlineCallbacks
     def test_multiple_devices(self):
         now = 1470174257070
 
-        yield self.store.store_device(
-            "user1", "device1", None
-        )
-        yield self.store.store_device(
-            "user1", "device2", None
-        )
-        yield self.store.store_device(
-            "user2", "device1", None
-        )
-        yield self.store.store_device(
-            "user2", "device2", None
-        )
+        yield self.store.store_device("user1", "device1", None)
+        yield self.store.store_device("user1", "device2", None)
+        yield self.store.store_device("user2", "device1", None)
+        yield self.store.store_device("user2", "device2", None)
 
-        yield self.store.set_e2e_device_keys(
-            "user1", "device1", now, 'json11')
-        yield self.store.set_e2e_device_keys(
-            "user1", "device2", now, 'json12')
-        yield self.store.set_e2e_device_keys(
-            "user2", "device1", now, 'json21')
-        yield self.store.set_e2e_device_keys(
-            "user2", "device2", now, 'json22')
-
-        res = yield self.store.get_e2e_device_keys((("user1", "device1"),
-                                                    ("user2", "device2")))
+        yield self.store.set_e2e_device_keys("user1", "device1", now, 'json11')
+        yield self.store.set_e2e_device_keys("user1", "device2", now, 'json12')
+        yield self.store.set_e2e_device_keys("user2", "device1", now, 'json21')
+        yield self.store.set_e2e_device_keys("user2", "device2", now, 'json22')
+
+        res = yield self.store.get_e2e_device_keys(
+            (("user1", "device1"), ("user2", "device2"))
+        )
         self.assertIn("user1", res)
         self.assertIn("device1", res["user1"])
         self.assertNotIn("device2", res["user1"])
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index 30683e7888..2fdf34fdf6 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -22,7 +22,7 @@ import tests.utils
 class EventFederationWorkerStoreTestCase(tests.unittest.TestCase):
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield tests.utils.setup_test_homeserver()
+        hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
         self.store = hs.get_datastore()
 
     @defer.inlineCallbacks
@@ -33,23 +33,32 @@ class EventFederationWorkerStoreTestCase(tests.unittest.TestCase):
         def insert_event(txn, i):
             event_id = '$event_%i:local' % i
 
-            txn.execute((
-                "INSERT INTO events ("
-                "   room_id, event_id, type, depth, topological_ordering,"
-                "   content, processed, outlier) "
-                "VALUES (?, ?, 'm.test', ?, ?, 'test', ?, ?)"
-            ), (room_id, event_id, i, i, True, False))
+            txn.execute(
+                (
+                    "INSERT INTO events ("
+                    "   room_id, event_id, type, depth, topological_ordering,"
+                    "   content, processed, outlier) "
+                    "VALUES (?, ?, 'm.test', ?, ?, 'test', ?, ?)"
+                ),
+                (room_id, event_id, i, i, True, False),
+            )
 
-            txn.execute((
-                'INSERT INTO event_forward_extremities (room_id, event_id) '
-                'VALUES (?, ?)'
-            ), (room_id, event_id))
+            txn.execute(
+                (
+                    'INSERT INTO event_forward_extremities (room_id, event_id) '
+                    'VALUES (?, ?)'
+                ),
+                (room_id, event_id),
+            )
 
-            txn.execute((
-                'INSERT INTO event_reference_hashes '
-                '(event_id, algorithm, hash) '
-                "VALUES (?, 'sha256', ?)"
-            ), (event_id, 'ffff'))
+            txn.execute(
+                (
+                    'INSERT INTO event_reference_hashes '
+                    '(event_id, algorithm, hash) '
+                    "VALUES (?, 'sha256', ?)"
+                ),
+                (event_id, b'ffff'),
+            )
 
         for i in range(0, 11):
             yield self.store.runInteraction("insert", insert_event, i)
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index 3cbf9a78b1..b114c6fb1d 100644
--- a/tests/storage/test_event_push_actions.py
+++ b/tests/storage/test_event_push_actions.py
@@ -13,25 +13,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from mock import Mock
+
 from twisted.internet import defer
 
 import tests.unittest
 import tests.utils
-from mock import Mock
 
 USER_ID = "@user:example.com"
 
 PlAIN_NOTIF = ["notify", {"set_tweak": "highlight", "value": False}]
 HIGHLIGHT = [
-    "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"}
+    "notify",
+    {"set_tweak": "sound", "value": "default"},
+    {"set_tweak": "highlight"},
 ]
 
 
 class EventPushActionsStoreTestCase(tests.unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield tests.utils.setup_test_homeserver()
+        hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
         self.store = hs.get_datastore()
 
     @defer.inlineCallbacks
@@ -54,12 +56,11 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase):
         @defer.inlineCallbacks
         def _assert_counts(noitf_count, highlight_count):
             counts = yield self.store.runInteraction(
-                "", self.store._get_unread_counts_by_pos_txn,
-                room_id, user_id, 0
+                "", self.store._get_unread_counts_by_pos_txn, room_id, user_id, 0
             )
             self.assertEquals(
                 counts,
-                {"notify_count": noitf_count, "highlight_count": highlight_count}
+                {"notify_count": noitf_count, "highlight_count": highlight_count},
             )
 
         @defer.inlineCallbacks
@@ -71,11 +72,13 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase):
             event.depth = stream
 
             yield self.store.add_push_actions_to_staging(
-                event.event_id, {user_id: action},
+                event.event_id, {user_id: action}
             )
             yield self.store.runInteraction(
-                "", self.store._set_push_actions_for_event_and_users_txn,
-                [(event, None)], [(event, None)],
+                "",
+                self.store._set_push_actions_for_event_and_users_txn,
+                [(event, None)],
+                [(event, None)],
             )
 
         def _rotate(stream):
@@ -85,8 +88,11 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase):
 
         def _mark_read(stream, depth):
             return self.store.runInteraction(
-                "", self.store._remove_old_push_actions_before_txn,
-                room_id, user_id, stream
+                "",
+                self.store._remove_old_push_actions_before_txn,
+                room_id,
+                user_id,
+                stream,
             )
 
         yield _assert_counts(0, 0)
@@ -111,9 +117,7 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase):
         yield _rotate(7)
 
         yield self.store._simple_delete(
-            table="event_push_actions",
-            keyvalues={"1": 1},
-            desc="",
+            table="event_push_actions", keyvalues={"1": 1}, desc=""
         )
 
         yield _assert_counts(1, 0)
@@ -131,18 +135,21 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase):
     @defer.inlineCallbacks
     def test_find_first_stream_ordering_after_ts(self):
         def add_event(so, ts):
-            return self.store._simple_insert("events", {
-                "stream_ordering": so,
-                "received_ts": ts,
-                "event_id": "event%i" % so,
-                "type": "",
-                "room_id": "",
-                "content": "",
-                "processed": True,
-                "outlier": False,
-                "topological_ordering": 0,
-                "depth": 0,
-            })
+            return self.store._simple_insert(
+                "events",
+                {
+                    "stream_ordering": so,
+                    "received_ts": ts,
+                    "event_id": "event%i" % so,
+                    "type": "",
+                    "room_id": "",
+                    "content": "",
+                    "processed": True,
+                    "outlier": False,
+                    "topological_ordering": 0,
+                    "depth": 0,
+                },
+            )
 
         # start with the base case where there are no events in the table
         r = yield self.store.find_first_stream_ordering_after_ts(11)
@@ -159,31 +166,27 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase):
 
         # add a bunch of dummy events to the events table
         for (stream_ordering, ts) in (
-                (3, 110),
-                (4, 120),
-                (5, 120),
-                (10, 130),
-                (20, 140),
+            (3, 110),
+            (4, 120),
+            (5, 120),
+            (10, 130),
+            (20, 140),
         ):
             yield add_event(stream_ordering, ts)
 
         r = yield self.store.find_first_stream_ordering_after_ts(110)
-        self.assertEqual(r, 3,
-                         "First event after 110ms should be 3, was %i" % r)
+        self.assertEqual(r, 3, "First event after 110ms should be 3, was %i" % r)
 
         # 4 and 5 are both after 120: we want 4 rather than 5
         r = yield self.store.find_first_stream_ordering_after_ts(120)
-        self.assertEqual(r, 4,
-                         "First event after 120ms should be 4, was %i" % r)
+        self.assertEqual(r, 4, "First event after 120ms should be 4, was %i" % r)
 
         r = yield self.store.find_first_stream_ordering_after_ts(129)
-        self.assertEqual(r, 10,
-                         "First event after 129ms should be 10, was %i" % r)
+        self.assertEqual(r, 10, "First event after 129ms should be 10, was %i" % r)
 
         # check we can get the last event
         r = yield self.store.find_first_stream_ordering_after_ts(140)
-        self.assertEqual(r, 20,
-                         "First event after 14ms should be 20, was %i" % r)
+        self.assertEqual(r, 20, "First event after 14ms should be 20, was %i" % r)
 
         # off the end
         r = yield self.store.find_first_stream_ordering_after_ts(160)
diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py
index 0be790d8f8..47f4a8ceac 100644
--- a/tests/storage/test_keys.py
+++ b/tests/storage/test_keys.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import signedjson.key
+
 from twisted.internet import defer
 
 import tests.unittest
@@ -27,7 +28,7 @@ class KeyStoreTestCase(tests.unittest.TestCase):
 
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield tests.utils.setup_test_homeserver()
+        hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
         self.store = hs.get_datastore()
 
     @defer.inlineCallbacks
@@ -38,15 +39,12 @@ class KeyStoreTestCase(tests.unittest.TestCase):
         key2 = signedjson.key.decode_verify_key_base64(
             "ed25519", "key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
         )
-        yield self.store.store_server_verify_key(
-            "server1", "from_server", 0, key1
-        )
-        yield self.store.store_server_verify_key(
-            "server1", "from_server", 0, key2
-        )
+        yield self.store.store_server_verify_key("server1", "from_server", 0, key1)
+        yield self.store.store_server_verify_key("server1", "from_server", 0, key2)
 
         res = yield self.store.get_server_verify_keys(
-            "server1", ["ed25519:key1", "ed25519:key2", "ed25519:key3"])
+            "server1", ["ed25519:key1", "ed25519:key2", "ed25519:key3"]
+        )
 
         self.assertEqual(len(res.keys()), 2)
         self.assertEqual(res["ed25519:key1"].version, "key1")
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
new file mode 100644
index 0000000000..f2ed866ae7
--- /dev/null
+++ b/tests/storage/test_monthly_active_users.py
@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+import tests.unittest
+import tests.utils
+from tests.utils import setup_test_homeserver
+
+FORTY_DAYS = 40 * 24 * 60 * 60
+
+
+class MonthlyActiveUsersTestCase(tests.unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+        super(MonthlyActiveUsersTestCase, self).__init__(*args, **kwargs)
+
+    @defer.inlineCallbacks
+    def setUp(self):
+        self.hs = yield setup_test_homeserver(self.addCleanup)
+        self.store = self.hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def test_initialise_reserved_users(self):
+        self.hs.config.max_mau_value = 5
+        user1 = "@user1:server"
+        user1_email = "user1@matrix.org"
+        user2 = "@user2:server"
+        user2_email = "user2@matrix.org"
+        threepids = [
+            {'medium': 'email', 'address': user1_email},
+            {'medium': 'email', 'address': user2_email},
+        ]
+        user_num = len(threepids)
+
+        yield self.store.register(user_id=user1, token="123", password_hash=None)
+
+        yield self.store.register(user_id=user2, token="456", password_hash=None)
+
+        now = int(self.hs.get_clock().time_msec())
+        yield self.store.user_add_threepid(user1, "email", user1_email, now, now)
+        yield self.store.user_add_threepid(user2, "email", user2_email, now, now)
+        yield self.store.initialise_reserved_users(threepids)
+
+        active_count = yield self.store.get_monthly_active_count()
+
+        # Test total counts
+        self.assertEquals(active_count, user_num)
+
+        # Test user is marked as active
+
+        timestamp = yield self.store.user_last_seen_monthly_active(user1)
+        self.assertTrue(timestamp)
+        timestamp = yield self.store.user_last_seen_monthly_active(user2)
+        self.assertTrue(timestamp)
+
+        # Test that users are never removed from the db.
+        self.hs.config.max_mau_value = 0
+
+        self.hs.get_clock().advance_time(FORTY_DAYS)
+
+        yield self.store.reap_monthly_active_users()
+
+        active_count = yield self.store.get_monthly_active_count()
+        self.assertEquals(active_count, user_num)
+
+        # Test that regalar users are removed from the db
+        ru_count = 2
+        yield self.store.upsert_monthly_active_user("@ru1:server")
+        yield self.store.upsert_monthly_active_user("@ru2:server")
+        active_count = yield self.store.get_monthly_active_count()
+
+        self.assertEqual(active_count, user_num + ru_count)
+        self.hs.config.max_mau_value = user_num
+        yield self.store.reap_monthly_active_users()
+
+        active_count = yield self.store.get_monthly_active_count()
+        self.assertEquals(active_count, user_num)
+
+    @defer.inlineCallbacks
+    def test_can_insert_and_count_mau(self):
+        count = yield self.store.get_monthly_active_count()
+        self.assertEqual(0, count)
+
+        yield self.store.upsert_monthly_active_user("@user:server")
+        count = yield self.store.get_monthly_active_count()
+
+        self.assertEqual(1, count)
+
+    @defer.inlineCallbacks
+    def test_user_last_seen_monthly_active(self):
+        user_id1 = "@user1:server"
+        user_id2 = "@user2:server"
+        user_id3 = "@user3:server"
+
+        result = yield self.store.user_last_seen_monthly_active(user_id1)
+        self.assertFalse(result == 0)
+        yield self.store.upsert_monthly_active_user(user_id1)
+        yield self.store.upsert_monthly_active_user(user_id2)
+        result = yield self.store.user_last_seen_monthly_active(user_id1)
+        self.assertTrue(result > 0)
+        result = yield self.store.user_last_seen_monthly_active(user_id3)
+        self.assertFalse(result == 0)
+
+    @defer.inlineCallbacks
+    def test_reap_monthly_active_users(self):
+        self.hs.config.max_mau_value = 5
+        initial_users = 10
+        for i in range(initial_users):
+            yield self.store.upsert_monthly_active_user("@user%d:server" % i)
+        count = yield self.store.get_monthly_active_count()
+        self.assertTrue(count, initial_users)
+        yield self.store.reap_monthly_active_users()
+        count = yield self.store.get_monthly_active_count()
+        self.assertEquals(count, initial_users - self.hs.config.max_mau_value)
+
+        self.hs.get_clock().advance_time(FORTY_DAYS)
+        yield self.store.reap_monthly_active_users()
+        count = yield self.store.get_monthly_active_count()
+        self.assertEquals(count, 0)
diff --git a/tests/storage/test_presence.py b/tests/storage/test_presence.py
index f5fcb611d4..b5b58ff660 100644
--- a/tests/storage/test_presence.py
+++ b/tests/storage/test_presence.py
@@ -14,20 +14,19 @@
 # limitations under the License.
 
 
-from tests import unittest
 from twisted.internet import defer
 
 from synapse.storage.presence import PresenceStore
 from synapse.types import UserID
 
-from tests.utils import setup_test_homeserver, MockClock
+from tests import unittest
+from tests.utils import MockClock, setup_test_homeserver
 
 
 class PresenceStoreTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield setup_test_homeserver(clock=MockClock())
+        hs = yield setup_test_homeserver(self.addCleanup, clock=MockClock())
 
         self.store = PresenceStore(None, hs)
 
@@ -38,16 +37,19 @@ class PresenceStoreTestCase(unittest.TestCase):
     def test_presence_list(self):
         self.assertEquals(
             [],
-            (yield self.store.get_presence_list(
-                observer_localpart=self.u_apple.localpart,
-            ))
+            (
+                yield self.store.get_presence_list(
+                    observer_localpart=self.u_apple.localpart
+                )
+            ),
         )
         self.assertEquals(
             [],
-            (yield self.store.get_presence_list(
-                observer_localpart=self.u_apple.localpart,
-                accepted=True,
-            ))
+            (
+                yield self.store.get_presence_list(
+                    observer_localpart=self.u_apple.localpart, accepted=True
+                )
+            ),
         )
 
         yield self.store.add_presence_list_pending(
@@ -57,16 +59,19 @@ class PresenceStoreTestCase(unittest.TestCase):
 
         self.assertEquals(
             [{"observed_user_id": "@banana:test", "accepted": 0}],
-            (yield self.store.get_presence_list(
-                observer_localpart=self.u_apple.localpart,
-            ))
+            (
+                yield self.store.get_presence_list(
+                    observer_localpart=self.u_apple.localpart
+                )
+            ),
         )
         self.assertEquals(
             [],
-            (yield self.store.get_presence_list(
-                observer_localpart=self.u_apple.localpart,
-                accepted=True,
-            ))
+            (
+                yield self.store.get_presence_list(
+                    observer_localpart=self.u_apple.localpart, accepted=True
+                )
+            ),
         )
 
         yield self.store.set_presence_list_accepted(
@@ -76,16 +81,19 @@ class PresenceStoreTestCase(unittest.TestCase):
 
         self.assertEquals(
             [{"observed_user_id": "@banana:test", "accepted": 1}],
-            (yield self.store.get_presence_list(
-                observer_localpart=self.u_apple.localpart,
-            ))
+            (
+                yield self.store.get_presence_list(
+                    observer_localpart=self.u_apple.localpart
+                )
+            ),
         )
         self.assertEquals(
             [{"observed_user_id": "@banana:test", "accepted": 1}],
-            (yield self.store.get_presence_list(
-                observer_localpart=self.u_apple.localpart,
-                accepted=True,
-            ))
+            (
+                yield self.store.get_presence_list(
+                    observer_localpart=self.u_apple.localpart, accepted=True
+                )
+            ),
         )
 
         yield self.store.del_presence_list(
@@ -95,14 +103,17 @@ class PresenceStoreTestCase(unittest.TestCase):
 
         self.assertEquals(
             [],
-            (yield self.store.get_presence_list(
-                observer_localpart=self.u_apple.localpart,
-            ))
+            (
+                yield self.store.get_presence_list(
+                    observer_localpart=self.u_apple.localpart
+                )
+            ),
         )
         self.assertEquals(
             [],
-            (yield self.store.get_presence_list(
-                observer_localpart=self.u_apple.localpart,
-                accepted=True,
-            ))
+            (
+                yield self.store.get_presence_list(
+                    observer_localpart=self.u_apple.localpart, accepted=True
+                )
+            ),
         )
diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py
index 423710c9c1..a1f6618bf9 100644
--- a/tests/storage/test_profile.py
+++ b/tests/storage/test_profile.py
@@ -14,20 +14,19 @@
 # limitations under the License.
 
 
-from tests import unittest
 from twisted.internet import defer
 
 from synapse.storage.profile import ProfileStore
 from synapse.types import UserID
 
+from tests import unittest
 from tests.utils import setup_test_homeserver
 
 
 class ProfileStoreTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield setup_test_homeserver()
+        hs = yield setup_test_homeserver(self.addCleanup)
 
         self.store = ProfileStore(None, hs)
 
@@ -35,24 +34,17 @@ class ProfileStoreTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_displayname(self):
-        yield self.store.create_profile(
-            self.u_frank.localpart
-        )
+        yield self.store.create_profile(self.u_frank.localpart)
 
-        yield self.store.set_profile_displayname(
-            self.u_frank.localpart, "Frank"
-        )
+        yield self.store.set_profile_displayname(self.u_frank.localpart, "Frank")
 
         self.assertEquals(
-            "Frank",
-            (yield self.store.get_profile_displayname(self.u_frank.localpart))
+            "Frank", (yield self.store.get_profile_displayname(self.u_frank.localpart))
         )
 
     @defer.inlineCallbacks
     def test_avatar_url(self):
-        yield self.store.create_profile(
-            self.u_frank.localpart
-        )
+        yield self.store.create_profile(self.u_frank.localpart)
 
         yield self.store.set_profile_avatar_url(
             self.u_frank.localpart, "http://my.site/here"
@@ -60,5 +52,5 @@ class ProfileStoreTestCase(unittest.TestCase):
 
         self.assertEquals(
             "http://my.site/here",
-            (yield self.store.get_profile_avatar_url(self.u_frank.localpart))
+            (yield self.store.get_profile_avatar_url(self.u_frank.localpart)),
         )
diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py
new file mode 100644
index 0000000000..f671599cb8
--- /dev/null
+++ b/tests/storage/test_purge.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.rest.client.v1 import room
+
+from tests.unittest import HomeserverTestCase
+
+
+class PurgeTests(HomeserverTestCase):
+
+    user_id = "@red:server"
+    servlets = [room.register_servlets]
+
+    def make_homeserver(self, reactor, clock):
+        hs = self.setup_test_homeserver("server", http_client=None)
+        return hs
+
+    def prepare(self, reactor, clock, hs):
+        self.room_id = self.helper.create_room_as(self.user_id)
+
+    def test_purge(self):
+        """
+        Purging a room will delete everything before the topological point.
+        """
+        # Send four messages to the room
+        first = self.helper.send(self.room_id, body="test1")
+        second = self.helper.send(self.room_id, body="test2")
+        third = self.helper.send(self.room_id, body="test3")
+        last = self.helper.send(self.room_id, body="test4")
+
+        storage = self.hs.get_datastore()
+
+        # Get the topological token
+        event = storage.get_topological_token_for_event(last["event_id"])
+        self.pump()
+        event = self.successResultOf(event)
+
+        # Purge everything before this topological token
+        purge = storage.purge_history(self.room_id, event, True)
+        self.pump()
+        self.assertEqual(self.successResultOf(purge), None)
+
+        # Try and get the events
+        get_first = storage.get_event(first["event_id"])
+        get_second = storage.get_event(second["event_id"])
+        get_third = storage.get_event(third["event_id"])
+        get_last = storage.get_event(last["event_id"])
+        self.pump()
+
+        # 1-3 should fail and last will succeed, meaning that 1-3 are deleted
+        # and last is not.
+        self.failureResultOf(get_first)
+        self.failureResultOf(get_second)
+        self.failureResultOf(get_third)
+        self.successResultOf(get_last)
+
+    def test_purge_wont_delete_extrems(self):
+        """
+        Purging a room will delete everything before the topological point.
+        """
+        # Send four messages to the room
+        first = self.helper.send(self.room_id, body="test1")
+        second = self.helper.send(self.room_id, body="test2")
+        third = self.helper.send(self.room_id, body="test3")
+        last = self.helper.send(self.room_id, body="test4")
+
+        storage = self.hs.get_datastore()
+
+        # Set the topological token higher than it should be
+        event = storage.get_topological_token_for_event(last["event_id"])
+        self.pump()
+        event = self.successResultOf(event)
+        event = "t{}-{}".format(
+            *list(map(lambda x: x + 1, map(int, event[1:].split("-"))))
+        )
+
+        # Purge everything before this topological token
+        purge = storage.purge_history(self.room_id, event, True)
+        self.pump()
+        f = self.failureResultOf(purge)
+        self.assertIn("greater than forward", f.value.args[0])
+
+        # Try and get the events
+        get_first = storage.get_event(first["event_id"])
+        get_second = storage.get_event(second["event_id"])
+        get_third = storage.get_event(third["event_id"])
+        get_last = storage.get_event(last["event_id"])
+        self.pump()
+
+        # Nothing is deleted.
+        self.successResultOf(get_first)
+        self.successResultOf(get_second)
+        self.successResultOf(get_third)
+        self.successResultOf(get_last)
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index 888ddfaddd..02bf975fbf 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -14,24 +14,22 @@
 # limitations under the License.
 
 
-from tests import unittest
+from mock import Mock
+
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, Membership
-from synapse.types import UserID, RoomID
+from synapse.types import RoomID, UserID
 
-from tests.utils import setup_test_homeserver
-
-from mock import Mock
+from tests import unittest
+from tests.utils import create_room, setup_test_homeserver
 
 
 class RedactionTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
         hs = yield setup_test_homeserver(
-            resource_for_federation=Mock(),
-            http_client=None,
+            self.addCleanup, resource_for_federation=Mock(), http_client=None
         )
 
         self.store = hs.get_datastore()
@@ -43,20 +41,25 @@ class RedactionTestCase(unittest.TestCase):
 
         self.room1 = RoomID.from_string("!abc123:test")
 
+        yield create_room(hs, self.room1.to_string(), self.u_alice.to_string())
+
         self.depth = 1
 
     @defer.inlineCallbacks
-    def inject_room_member(self, room, user, membership, replaces_state=None,
-                           extra_content={}):
+    def inject_room_member(
+        self, room, user, membership, replaces_state=None, extra_content={}
+    ):
         content = {"membership": membership}
         content.update(extra_content)
-        builder = self.event_builder_factory.new({
-            "type": EventTypes.Member,
-            "sender": user.to_string(),
-            "state_key": user.to_string(),
-            "room_id": room.to_string(),
-            "content": content,
-        })
+        builder = self.event_builder_factory.new(
+            {
+                "type": EventTypes.Member,
+                "sender": user.to_string(),
+                "state_key": user.to_string(),
+                "room_id": room.to_string(),
+                "content": content,
+            }
+        )
 
         event, context = yield self.event_creation_handler.create_new_client_event(
             builder
@@ -70,13 +73,15 @@ class RedactionTestCase(unittest.TestCase):
     def inject_message(self, room, user, body):
         self.depth += 1
 
-        builder = self.event_builder_factory.new({
-            "type": EventTypes.Message,
-            "sender": user.to_string(),
-            "state_key": user.to_string(),
-            "room_id": room.to_string(),
-            "content": {"body": body, "msgtype": u"message"},
-        })
+        builder = self.event_builder_factory.new(
+            {
+                "type": EventTypes.Message,
+                "sender": user.to_string(),
+                "state_key": user.to_string(),
+                "room_id": room.to_string(),
+                "content": {"body": body, "msgtype": u"message"},
+            }
+        )
 
         event, context = yield self.event_creation_handler.create_new_client_event(
             builder
@@ -88,14 +93,16 @@ class RedactionTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def inject_redaction(self, room, event_id, user, reason):
-        builder = self.event_builder_factory.new({
-            "type": EventTypes.Redaction,
-            "sender": user.to_string(),
-            "state_key": user.to_string(),
-            "room_id": room.to_string(),
-            "content": {"reason": reason},
-            "redacts": event_id,
-        })
+        builder = self.event_builder_factory.new(
+            {
+                "type": EventTypes.Redaction,
+                "sender": user.to_string(),
+                "state_key": user.to_string(),
+                "room_id": room.to_string(),
+                "content": {"reason": reason},
+                "redacts": event_id,
+            }
+        )
 
         event, context = yield self.event_creation_handler.create_new_client_event(
             builder
@@ -105,9 +112,7 @@ class RedactionTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_redact(self):
-        yield self.inject_room_member(
-            self.room1, self.u_alice, Membership.JOIN
-        )
+        yield self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
 
         msg_event = yield self.inject_message(self.room1, self.u_alice, u"t")
 
@@ -157,13 +162,10 @@ class RedactionTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_redact_join(self):
-        yield self.inject_room_member(
-            self.room1, self.u_alice, Membership.JOIN
-        )
+        yield self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
 
         msg_event = yield self.inject_room_member(
-            self.room1, self.u_bob, Membership.JOIN,
-            extra_content={"blue": "red"},
+            self.room1, self.u_bob, Membership.JOIN, extra_content={"blue": "red"}
         )
 
         event = yield self.store.get_event(msg_event.event_id)
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index f863b75846..3dfb7b903a 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -14,26 +14,22 @@
 # limitations under the License.
 
 
-from tests import unittest
 from twisted.internet import defer
 
+from tests import unittest
 from tests.utils import setup_test_homeserver
 
 
 class RegistrationStoreTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield setup_test_homeserver()
+        hs = yield setup_test_homeserver(self.addCleanup)
         self.db_pool = hs.get_db_pool()
 
         self.store = hs.get_datastore()
 
         self.user_id = "@my-user:test"
-        self.tokens = [
-            "AbCdEfGhIjKlMnOpQrStUvWxYz",
-            "BcDeFgHiJkLmNoPqRsTuVwXyZa"
-        ]
+        self.tokens = ["AbCdEfGhIjKlMnOpQrStUvWxYz", "BcDeFgHiJkLmNoPqRsTuVwXyZa"]
         self.pwhash = "{xx1}123456789"
         self.device_id = "akgjhdjklgshg"
 
@@ -50,35 +46,28 @@ class RegistrationStoreTestCase(unittest.TestCase):
                 "consent_version": None,
                 "consent_server_notice_sent": None,
                 "appservice_id": None,
+                "creation_ts": 1000,
             },
-            (yield self.store.get_user_by_id(self.user_id))
+            (yield self.store.get_user_by_id(self.user_id)),
         )
 
         result = yield self.store.get_user_by_access_token(self.tokens[0])
 
-        self.assertDictContainsSubset(
-            {
-                "name": self.user_id,
-            },
-            result
-        )
+        self.assertDictContainsSubset({"name": self.user_id}, result)
 
         self.assertTrue("token_id" in result)
 
     @defer.inlineCallbacks
     def test_add_tokens(self):
         yield self.store.register(self.user_id, self.tokens[0], self.pwhash)
-        yield self.store.add_access_token_to_user(self.user_id, self.tokens[1],
-                                                  self.device_id)
+        yield self.store.add_access_token_to_user(
+            self.user_id, self.tokens[1], self.device_id
+        )
 
         result = yield self.store.get_user_by_access_token(self.tokens[1])
 
         self.assertDictContainsSubset(
-            {
-                "name": self.user_id,
-                "device_id": self.device_id,
-            },
-            result
+            {"name": self.user_id, "device_id": self.device_id}, result
         )
 
         self.assertTrue("token_id" in result)
@@ -87,12 +76,13 @@ class RegistrationStoreTestCase(unittest.TestCase):
     def test_user_delete_access_tokens(self):
         # add some tokens
         yield self.store.register(self.user_id, self.tokens[0], self.pwhash)
-        yield self.store.add_access_token_to_user(self.user_id, self.tokens[1],
-                                                  self.device_id)
+        yield self.store.add_access_token_to_user(
+            self.user_id, self.tokens[1], self.device_id
+        )
 
         # now delete some
         yield self.store.user_delete_access_tokens(
-            self.user_id, device_id=self.device_id,
+            self.user_id, device_id=self.device_id
         )
 
         # check they were deleted
@@ -107,8 +97,7 @@ class RegistrationStoreTestCase(unittest.TestCase):
         yield self.store.user_delete_access_tokens(self.user_id)
 
         user = yield self.store.get_user_by_access_token(self.tokens[0])
-        self.assertIsNone(user,
-                          "access token was not deleted without device_id")
+        self.assertIsNone(user, "access token was not deleted without device_id")
 
 
 class TokenGenerator:
@@ -117,4 +106,4 @@ class TokenGenerator:
 
     def generate(self, user_id):
         self._last_issued_token += 1
-        return u"%s-%d" % (user_id, self._last_issued_token,)
+        return u"%s-%d" % (user_id, self._last_issued_token)
diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py
index ef8a4d234f..a1ea23b068 100644
--- a/tests/storage/test_room.py
+++ b/tests/storage/test_room.py
@@ -14,20 +14,19 @@
 # limitations under the License.
 
 
-from tests import unittest
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
-from synapse.types import UserID, RoomID, RoomAlias
+from synapse.types import RoomAlias, RoomID, UserID
 
+from tests import unittest
 from tests.utils import setup_test_homeserver
 
 
 class RoomStoreTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield setup_test_homeserver()
+        hs = yield setup_test_homeserver(self.addCleanup)
 
         # We can't test RoomStore on its own without the DirectoryStore, for
         # management of the 'room_aliases' table
@@ -40,7 +39,7 @@ class RoomStoreTestCase(unittest.TestCase):
         yield self.store.store_room(
             self.room.to_string(),
             room_creator_user_id=self.u_creator.to_string(),
-            is_public=True
+            is_public=True,
         )
 
     @defer.inlineCallbacks
@@ -49,17 +48,16 @@ class RoomStoreTestCase(unittest.TestCase):
             {
                 "room_id": self.room.to_string(),
                 "creator": self.u_creator.to_string(),
-                "is_public": True
+                "is_public": True,
             },
-            (yield self.store.get_room(self.room.to_string()))
+            (yield self.store.get_room(self.room.to_string())),
         )
 
 
 class RoomEventsStoreTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
-        hs = setup_test_homeserver()
+        hs = setup_test_homeserver(self.addCleanup)
 
         # Room events need the full datastore, for persist_event() and
         # get_room_state()
@@ -69,18 +67,13 @@ class RoomEventsStoreTestCase(unittest.TestCase):
         self.room = RoomID.from_string("!abcde:test")
 
         yield self.store.store_room(
-            self.room.to_string(),
-            room_creator_user_id="@creator:text",
-            is_public=True
+            self.room.to_string(), room_creator_user_id="@creator:text", is_public=True
         )
 
     @defer.inlineCallbacks
     def inject_room_event(self, **kwargs):
         yield self.store.persist_event(
-            self.event_factory.create_event(
-                room_id=self.room.to_string(),
-                **kwargs
-            )
+            self.event_factory.create_event(room_id=self.room.to_string(), **kwargs)
         )
 
     @defer.inlineCallbacks
@@ -88,22 +81,15 @@ class RoomEventsStoreTestCase(unittest.TestCase):
         name = u"A-Room-Name"
 
         yield self.inject_room_event(
-            etype=EventTypes.Name,
-            name=name,
-            content={"name": name},
-            depth=1,
+            etype=EventTypes.Name, name=name, content={"name": name}, depth=1
         )
 
-        state = yield self.store.get_current_state(
-            room_id=self.room.to_string()
-        )
+        state = yield self.store.get_current_state(room_id=self.room.to_string())
 
         self.assertEquals(1, len(state))
         self.assertObjectHasAttributes(
-            {"type": "m.room.name",
-             "room_id": self.room.to_string(),
-             "name": name},
-            state[0]
+            {"type": "m.room.name", "room_id": self.room.to_string(), "name": name},
+            state[0],
         )
 
     @defer.inlineCallbacks
@@ -111,22 +97,15 @@ class RoomEventsStoreTestCase(unittest.TestCase):
         topic = u"A place for things"
 
         yield self.inject_room_event(
-            etype=EventTypes.Topic,
-            topic=topic,
-            content={"topic": topic},
-            depth=1,
+            etype=EventTypes.Topic, topic=topic, content={"topic": topic}, depth=1
         )
 
-        state = yield self.store.get_current_state(
-            room_id=self.room.to_string()
-        )
+        state = yield self.store.get_current_state(room_id=self.room.to_string())
 
         self.assertEquals(1, len(state))
         self.assertObjectHasAttributes(
-            {"type": "m.room.topic",
-             "room_id": self.room.to_string(),
-             "topic": topic},
-            state[0]
+            {"type": "m.room.topic", "room_id": self.room.to_string(), "topic": topic},
+            state[0],
         )
 
     # Not testing the various 'level' methods for now because there's lots
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 657b279e5d..978c66133d 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -14,24 +14,22 @@
 # limitations under the License.
 
 
-from tests import unittest
+from mock import Mock
+
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, Membership
-from synapse.types import UserID, RoomID
+from synapse.types import RoomID, UserID
 
-from tests.utils import setup_test_homeserver
-
-from mock import Mock
+from tests import unittest
+from tests.utils import create_room, setup_test_homeserver
 
 
 class RoomMemberStoreTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def setUp(self):
         hs = yield setup_test_homeserver(
-            resource_for_federation=Mock(),
-            http_client=None,
+            self.addCleanup, resource_for_federation=Mock(), http_client=None
         )
         # We can't test the RoomMemberStore on its own without the other event
         # storage logic
@@ -47,15 +45,19 @@ class RoomMemberStoreTestCase(unittest.TestCase):
 
         self.room = RoomID.from_string("!abc123:test")
 
+        yield create_room(hs, self.room.to_string(), self.u_alice.to_string())
+
     @defer.inlineCallbacks
     def inject_room_member(self, room, user, membership, replaces_state=None):
-        builder = self.event_builder_factory.new({
-            "type": EventTypes.Member,
-            "sender": user.to_string(),
-            "state_key": user.to_string(),
-            "room_id": room.to_string(),
-            "content": {"membership": membership},
-        })
+        builder = self.event_builder_factory.new(
+            {
+                "type": EventTypes.Member,
+                "sender": user.to_string(),
+                "state_key": user.to_string(),
+                "room_id": room.to_string(),
+                "content": {"membership": membership},
+            }
+        )
 
         event, context = yield self.event_creation_handler.create_new_client_event(
             builder
@@ -71,9 +73,12 @@ class RoomMemberStoreTestCase(unittest.TestCase):
 
         self.assertEquals(
             [self.room.to_string()],
-            [m.room_id for m in (
-                yield self.store.get_rooms_for_user_where_membership_is(
-                    self.u_alice.to_string(), [Membership.JOIN]
+            [
+                m.room_id
+                for m in (
+                    yield self.store.get_rooms_for_user_where_membership_is(
+                        self.u_alice.to_string(), [Membership.JOIN]
+                    )
                 )
-            )]
+            ],
         )
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
new file mode 100644
index 0000000000..d717b9f94e
--- /dev/null
+++ b/tests/storage/test_state.py
@@ -0,0 +1,435 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.types import RoomID, UserID
+
+import tests.unittest
+import tests.utils
+
+logger = logging.getLogger(__name__)
+
+
+class StateStoreTestCase(tests.unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+        super(StateStoreTestCase, self).__init__(*args, **kwargs)
+        self.store = None  # type: synapse.storage.DataStore
+
+    @defer.inlineCallbacks
+    def setUp(self):
+        hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
+
+        self.store = hs.get_datastore()
+        self.event_builder_factory = hs.get_event_builder_factory()
+        self.event_creation_handler = hs.get_event_creation_handler()
+
+        self.u_alice = UserID.from_string("@alice:test")
+        self.u_bob = UserID.from_string("@bob:test")
+
+        self.room = RoomID.from_string("!abc123:test")
+
+        yield self.store.store_room(
+            self.room.to_string(), room_creator_user_id="@creator:text", is_public=True
+        )
+
+    @defer.inlineCallbacks
+    def inject_state_event(self, room, sender, typ, state_key, content):
+        builder = self.event_builder_factory.new(
+            {
+                "type": typ,
+                "sender": sender.to_string(),
+                "state_key": state_key,
+                "room_id": room.to_string(),
+                "content": content,
+            }
+        )
+
+        event, context = yield self.event_creation_handler.create_new_client_event(
+            builder
+        )
+
+        yield self.store.persist_event(event, context)
+
+        defer.returnValue(event)
+
+    def assertStateMapEqual(self, s1, s2):
+        for t in s1:
+            # just compare event IDs for simplicity
+            self.assertEqual(s1[t].event_id, s2[t].event_id)
+        self.assertEqual(len(s1), len(s2))
+
+    @defer.inlineCallbacks
+    def test_get_state_for_event(self):
+
+        # this defaults to a linear DAG as each new injection defaults to whatever
+        # forward extremities are currently in the DB for this room.
+        e1 = yield self.inject_state_event(
+            self.room, self.u_alice, EventTypes.Create, '', {}
+        )
+        e2 = yield self.inject_state_event(
+            self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"}
+        )
+        e3 = yield self.inject_state_event(
+            self.room,
+            self.u_alice,
+            EventTypes.Member,
+            self.u_alice.to_string(),
+            {"membership": Membership.JOIN},
+        )
+        e4 = yield self.inject_state_event(
+            self.room,
+            self.u_bob,
+            EventTypes.Member,
+            self.u_bob.to_string(),
+            {"membership": Membership.JOIN},
+        )
+        e5 = yield self.inject_state_event(
+            self.room,
+            self.u_bob,
+            EventTypes.Member,
+            self.u_bob.to_string(),
+            {"membership": Membership.LEAVE},
+        )
+
+        # check we get the full state as of the final event
+        state = yield self.store.get_state_for_event(
+            e5.event_id, None, filtered_types=None
+        )
+
+        self.assertIsNotNone(e4)
+
+        self.assertStateMapEqual(
+            {
+                (e1.type, e1.state_key): e1,
+                (e2.type, e2.state_key): e2,
+                (e3.type, e3.state_key): e3,
+                # e4 is overwritten by e5
+                (e5.type, e5.state_key): e5,
+            },
+            state,
+        )
+
+        # check we can filter to the m.room.name event (with a '' state key)
+        state = yield self.store.get_state_for_event(
+            e5.event_id, [(EventTypes.Name, '')], filtered_types=None
+        )
+
+        self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
+
+        # check we can filter to the m.room.name event (with a wildcard None state key)
+        state = yield self.store.get_state_for_event(
+            e5.event_id, [(EventTypes.Name, None)], filtered_types=None
+        )
+
+        self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
+
+        # check we can grab the m.room.member events (with a wildcard None state key)
+        state = yield self.store.get_state_for_event(
+            e5.event_id, [(EventTypes.Member, None)], filtered_types=None
+        )
+
+        self.assertStateMapEqual(
+            {(e3.type, e3.state_key): e3, (e5.type, e5.state_key): e5}, state
+        )
+
+        # check we can use filtered_types to grab a specific room member
+        # without filtering out the other event types
+        state = yield self.store.get_state_for_event(
+            e5.event_id,
+            [(EventTypes.Member, self.u_alice.to_string())],
+            filtered_types=[EventTypes.Member],
+        )
+
+        self.assertStateMapEqual(
+            {
+                (e1.type, e1.state_key): e1,
+                (e2.type, e2.state_key): e2,
+                (e3.type, e3.state_key): e3,
+            },
+            state,
+        )
+
+        # check that types=[], filtered_types=[EventTypes.Member]
+        # doesn't return all members
+        state = yield self.store.get_state_for_event(
+            e5.event_id, [], filtered_types=[EventTypes.Member]
+        )
+
+        self.assertStateMapEqual(
+            {(e1.type, e1.state_key): e1, (e2.type, e2.state_key): e2}, state
+        )
+
+        #######################################################
+        # _get_some_state_from_cache tests against a full cache
+        #######################################################
+
+        room_id = self.room.to_string()
+        group_ids = yield self.store.get_state_groups_ids(room_id, [e5.event_id])
+        group = list(group_ids.keys())[0]
+
+        # test _get_some_state_from_cache correctly filters out members with types=[]
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_cache,
+            group, [], filtered_types=[EventTypes.Member]
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual(
+            {
+                (e1.type, e1.state_key): e1.event_id,
+                (e2.type, e2.state_key): e2.event_id,
+            },
+            state_dict,
+        )
+
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_members_cache,
+            group, [], filtered_types=[EventTypes.Member]
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual(
+            {},
+            state_dict,
+        )
+
+        # test _get_some_state_from_cache correctly filters in members with wildcard types
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_cache,
+            group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual(
+            {
+                (e1.type, e1.state_key): e1.event_id,
+                (e2.type, e2.state_key): e2.event_id,
+            },
+            state_dict,
+        )
+
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_members_cache,
+            group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual(
+            {
+                (e3.type, e3.state_key): e3.event_id,
+                # e4 is overwritten by e5
+                (e5.type, e5.state_key): e5.event_id,
+            },
+            state_dict,
+        )
+
+        # test _get_some_state_from_cache correctly filters in members with specific types
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_cache,
+            group,
+            [(EventTypes.Member, e5.state_key)],
+            filtered_types=[EventTypes.Member],
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual(
+            {
+                (e1.type, e1.state_key): e1.event_id,
+                (e2.type, e2.state_key): e2.event_id,
+            },
+            state_dict,
+        )
+
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_members_cache,
+            group,
+            [(EventTypes.Member, e5.state_key)],
+            filtered_types=[EventTypes.Member],
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual(
+            {
+                (e5.type, e5.state_key): e5.event_id,
+            },
+            state_dict,
+        )
+
+        # test _get_some_state_from_cache correctly filters in members with specific types
+        # and no filtered_types
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_members_cache,
+            group, [(EventTypes.Member, e5.state_key)], filtered_types=None
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
+
+        #######################################################
+        # deliberately remove e2 (room name) from the _state_group_cache
+
+        (is_all, known_absent, state_dict_ids) = self.store._state_group_cache.get(
+            group
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertEqual(known_absent, set())
+        self.assertDictEqual(
+            state_dict_ids,
+            {
+                (e1.type, e1.state_key): e1.event_id,
+                (e2.type, e2.state_key): e2.event_id,
+            },
+        )
+
+        state_dict_ids.pop((e2.type, e2.state_key))
+        self.store._state_group_cache.invalidate(group)
+        self.store._state_group_cache.update(
+            sequence=self.store._state_group_cache.sequence,
+            key=group,
+            value=state_dict_ids,
+            # list fetched keys so it knows it's partial
+            fetched_keys=(
+                (e1.type, e1.state_key),
+            ),
+        )
+
+        (is_all, known_absent, state_dict_ids) = self.store._state_group_cache.get(
+            group
+        )
+
+        self.assertEqual(is_all, False)
+        self.assertEqual(
+            known_absent,
+            set(
+                [
+                    (e1.type, e1.state_key),
+                ]
+            ),
+        )
+        self.assertDictEqual(
+            state_dict_ids,
+            {
+                (e1.type, e1.state_key): e1.event_id,
+            },
+        )
+
+        ############################################
+        # test that things work with a partial cache
+
+        # test _get_some_state_from_cache correctly filters out members with types=[]
+        room_id = self.room.to_string()
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_cache,
+            group, [], filtered_types=[EventTypes.Member]
+        )
+
+        self.assertEqual(is_all, False)
+        self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
+
+        room_id = self.room.to_string()
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_members_cache,
+            group, [], filtered_types=[EventTypes.Member]
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual({}, state_dict)
+
+        # test _get_some_state_from_cache correctly filters in members wildcard types
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_cache,
+            group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
+        )
+
+        self.assertEqual(is_all, False)
+        self.assertDictEqual(
+            {
+                (e1.type, e1.state_key): e1.event_id,
+            },
+            state_dict,
+        )
+
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_members_cache,
+            group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual(
+            {
+                (e3.type, e3.state_key): e3.event_id,
+                (e5.type, e5.state_key): e5.event_id,
+            },
+            state_dict,
+        )
+
+        # test _get_some_state_from_cache correctly filters in members with specific types
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_cache,
+            group,
+            [(EventTypes.Member, e5.state_key)],
+            filtered_types=[EventTypes.Member],
+        )
+
+        self.assertEqual(is_all, False)
+        self.assertDictEqual(
+            {
+                (e1.type, e1.state_key): e1.event_id,
+            },
+            state_dict,
+        )
+
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_members_cache,
+            group,
+            [(EventTypes.Member, e5.state_key)],
+            filtered_types=[EventTypes.Member],
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual(
+            {
+                (e5.type, e5.state_key): e5.event_id,
+            },
+            state_dict,
+        )
+
+        # test _get_some_state_from_cache correctly filters in members with specific types
+        # and no filtered_types
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_cache,
+            group, [(EventTypes.Member, e5.state_key)], filtered_types=None
+        )
+
+        self.assertEqual(is_all, False)
+        self.assertDictEqual({}, state_dict)
+
+        (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+            self.store._state_group_members_cache,
+            group, [(EventTypes.Member, e5.state_key)], filtered_types=None
+        )
+
+        self.assertEqual(is_all, True)
+        self.assertDictEqual(
+            {
+                (e5.type, e5.state_key): e5.event_id,
+            },
+            state_dict,
+        )
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index 0891308f25..b46e0ea7e2 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -17,6 +17,7 @@ from twisted.internet import defer
 
 from synapse.storage import UserDirectoryStore
 from synapse.storage.roommember import ProfileInfo
+
 from tests import unittest
 from tests.utils import setup_test_homeserver
 
@@ -28,7 +29,7 @@ BOBBY = "@bobby:a"
 class UserDirectoryStoreTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def setUp(self):
-        self.hs = yield setup_test_homeserver()
+        self.hs = yield setup_test_homeserver(self.addCleanup)
         self.store = UserDirectoryStore(None, self.hs)
 
         # alice and bob are both in !room_id. bobby is not but shares
@@ -38,20 +39,12 @@ class UserDirectoryStoreTestCase(unittest.TestCase):
             {
                 ALICE: ProfileInfo(None, "alice"),
                 BOB: ProfileInfo(None, "bob"),
-                BOBBY: ProfileInfo(None, "bobby")
+                BOBBY: ProfileInfo(None, "bobby"),
             },
         )
-        yield self.store.add_users_to_public_room(
-            "!room:id",
-            [ALICE, BOB],
-        )
+        yield self.store.add_users_to_public_room("!room:id", [ALICE, BOB])
         yield self.store.add_users_who_share_room(
-            "!room:id",
-            False,
-            (
-                (ALICE, BOB),
-                (BOB, ALICE),
-            ),
+            "!room:id", False, ((ALICE, BOB), (BOB, ALICE))
         )
 
     @defer.inlineCallbacks
@@ -61,11 +54,9 @@ class UserDirectoryStoreTestCase(unittest.TestCase):
         r = yield self.store.search_user_dir(ALICE, "bob", 10)
         self.assertFalse(r["limited"])
         self.assertEqual(1, len(r["results"]))
-        self.assertDictEqual(r["results"][0], {
-            "user_id": BOB,
-            "display_name": "bob",
-            "avatar_url": None,
-        })
+        self.assertDictEqual(
+            r["results"][0], {"user_id": BOB, "display_name": "bob", "avatar_url": None}
+        )
 
     @defer.inlineCallbacks
     def test_search_user_dir_all_users(self):
@@ -74,15 +65,13 @@ class UserDirectoryStoreTestCase(unittest.TestCase):
             r = yield self.store.search_user_dir(ALICE, "bob", 10)
             self.assertFalse(r["limited"])
             self.assertEqual(2, len(r["results"]))
-            self.assertDictEqual(r["results"][0], {
-                "user_id": BOB,
-                "display_name": "bob",
-                "avatar_url": None,
-            })
-            self.assertDictEqual(r["results"][1], {
-                "user_id": BOBBY,
-                "display_name": "bobby",
-                "avatar_url": None,
-            })
+            self.assertDictEqual(
+                r["results"][0],
+                {"user_id": BOB, "display_name": "bob", "avatar_url": None},
+            )
+            self.assertDictEqual(
+                r["results"][1],
+                {"user_id": BOBBY, "display_name": "bobby", "avatar_url": None},
+            )
         finally:
             self.hs.config.user_directory_search_all_users = False
diff --git a/tests/test_distributor.py b/tests/test_distributor.py
index 010aeaee7e..b57f36e6ac 100644
--- a/tests/test_distributor.py
+++ b/tests/test_distributor.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,52 +14,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from . import unittest
-from twisted.internet import defer
-
 from mock import Mock, patch
 
 from synapse.util.distributor import Distributor
-from synapse.util.async import run_on_reactor
 
+from . import unittest
 
-class DistributorTestCase(unittest.TestCase):
 
+class DistributorTestCase(unittest.TestCase):
     def setUp(self):
         self.dist = Distributor()
 
-    @defer.inlineCallbacks
     def test_signal_dispatch(self):
         self.dist.declare("alert")
 
         observer = Mock()
         self.dist.observe("alert", observer)
 
-        d = self.dist.fire("alert", 1, 2, 3)
-        yield d
-        self.assertTrue(d.called)
+        self.dist.fire("alert", 1, 2, 3)
         observer.assert_called_with(1, 2, 3)
 
-    @defer.inlineCallbacks
-    def test_signal_dispatch_deferred(self):
-        self.dist.declare("whine")
-
-        d_inner = defer.Deferred()
-
-        def observer():
-            return d_inner
-
-        self.dist.observe("whine", observer)
-
-        d_outer = self.dist.fire("whine")
-
-        self.assertFalse(d_outer.called)
-
-        d_inner.callback(None)
-        yield d_outer
-        self.assertTrue(d_outer.called)
-
-    @defer.inlineCallbacks
     def test_signal_catch(self):
         self.dist.declare("alarm")
 
@@ -68,54 +43,26 @@ class DistributorTestCase(unittest.TestCase):
 
         observers[0].side_effect = Exception("Awoogah!")
 
-        with patch(
-            "synapse.util.distributor.logger", spec=["warning"]
-        ) as mock_logger:
-            d = self.dist.fire("alarm", "Go")
-            yield d
-            self.assertTrue(d.called)
+        with patch("synapse.util.distributor.logger", spec=["warning"]) as mock_logger:
+            self.dist.fire("alarm", "Go")
 
             observers[0].assert_called_once_with("Go")
             observers[1].assert_called_once_with("Go")
 
             self.assertEquals(mock_logger.warning.call_count, 1)
-            self.assertIsInstance(
-                mock_logger.warning.call_args[0][0], str
-            )
-
-    @defer.inlineCallbacks
-    def test_signal_catch_no_suppress(self):
-        # Gut-wrenching
-        self.dist.suppress_failures = False
-
-        self.dist.declare("whail")
-
-        class MyException(Exception):
-            pass
+            self.assertIsInstance(mock_logger.warning.call_args[0][0], str)
 
-        @defer.inlineCallbacks
-        def observer():
-            yield run_on_reactor()
-            raise MyException("Oopsie")
-
-        self.dist.observe("whail", observer)
-
-        d = self.dist.fire("whail")
-
-        yield self.assertFailure(d, MyException)
-        self.dist.suppress_failures = True
-
-    @defer.inlineCallbacks
     def test_signal_prereg(self):
         observer = Mock()
         self.dist.observe("flare", observer)
 
         self.dist.declare("flare")
-        yield self.dist.fire("flare", 4, 5)
+        self.dist.fire("flare", 4, 5)
 
         observer.assert_called_with(4, 5)
 
     def test_signal_undeclared(self):
         def code():
             self.dist.fire("notification")
+
         self.assertRaises(KeyError, code)
diff --git a/tests/test_dns.py b/tests/test_dns.py
index 3b360a0fc7..90bd34be34 100644
--- a/tests/test_dns.py
+++ b/tests/test_dns.py
@@ -13,20 +13,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from . import unittest
+from mock import Mock
+
 from twisted.internet import defer
 from twisted.names import dns, error
 
-from mock import Mock
-
 from synapse.http.endpoint import resolve_service
 
 from tests.utils import MockClock
 
+from . import unittest
+
 
 @unittest.DEBUG
 class DnsTestCase(unittest.TestCase):
-
     @defer.inlineCallbacks
     def test_resolve(self):
         dns_client_mock = Mock()
@@ -35,14 +35,11 @@ class DnsTestCase(unittest.TestCase):
         host_name = "example.com"
 
         answer_srv = dns.RRHeader(
-            type=dns.SRV,
-            payload=dns.Record_SRV(
-                target=host_name,
-            )
+            type=dns.SRV, payload=dns.Record_SRV(target=host_name)
         )
 
         dns_client_mock.lookupService.return_value = defer.succeed(
-            ([answer_srv], None, None),
+            ([answer_srv], None, None)
         )
 
         cache = {}
@@ -67,9 +64,7 @@ class DnsTestCase(unittest.TestCase):
         entry = Mock(spec_set=["expires"])
         entry.expires = 0
 
-        cache = {
-            service_name: [entry]
-        }
+        cache = {service_name: [entry]}
 
         servers = yield resolve_service(
             service_name, dns_client=dns_client_mock, cache=cache
@@ -92,12 +87,10 @@ class DnsTestCase(unittest.TestCase):
         entry = Mock(spec_set=["expires"])
         entry.expires = 999999999
 
-        cache = {
-            service_name: [entry]
-        }
+        cache = {service_name: [entry]}
 
         servers = yield resolve_service(
-            service_name, dns_client=dns_client_mock, cache=cache, clock=clock,
+            service_name, dns_client=dns_client_mock, cache=cache, clock=clock
         )
 
         self.assertFalse(dns_client_mock.lookupService.called)
@@ -116,9 +109,7 @@ class DnsTestCase(unittest.TestCase):
         cache = {}
 
         with self.assertRaises(error.DNSServerError):
-            yield resolve_service(
-                service_name, dns_client=dns_client_mock, cache=cache
-            )
+            yield resolve_service(service_name, dns_client=dns_client_mock, cache=cache)
 
     @defer.inlineCallbacks
     def test_name_error(self):
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
new file mode 100644
index 0000000000..411b4a9f86
--- /dev/null
+++ b/tests/test_event_auth.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+from synapse import event_auth
+from synapse.api.errors import AuthError
+from synapse.events import FrozenEvent
+
+
+class EventAuthTestCase(unittest.TestCase):
+    def test_random_users_cannot_send_state_before_first_pl(self):
+        """
+        Check that, before the first PL lands, the creator is the only user
+        that can send a state event.
+        """
+        creator = "@creator:example.com"
+        joiner = "@joiner:example.com"
+        auth_events = {
+            ("m.room.create", ""): _create_event(creator),
+            ("m.room.member", creator): _join_event(creator),
+            ("m.room.member", joiner): _join_event(joiner),
+        }
+
+        # creator should be able to send state
+        event_auth.check(_random_state_event(creator), auth_events, do_sig_check=False)
+
+        # joiner should not be able to send state
+        self.assertRaises(
+            AuthError,
+            event_auth.check,
+            _random_state_event(joiner),
+            auth_events,
+            do_sig_check=False,
+        ),
+
+    def test_state_default_level(self):
+        """
+        Check that users above the state_default level can send state and
+        those below cannot
+        """
+        creator = "@creator:example.com"
+        pleb = "@joiner:example.com"
+        king = "@joiner2:example.com"
+
+        auth_events = {
+            ("m.room.create", ""): _create_event(creator),
+            ("m.room.member", creator): _join_event(creator),
+            ("m.room.power_levels", ""): _power_levels_event(
+                creator, {"state_default": "30", "users": {pleb: "29", king: "30"}}
+            ),
+            ("m.room.member", pleb): _join_event(pleb),
+            ("m.room.member", king): _join_event(king),
+        }
+
+        # pleb should not be able to send state
+        self.assertRaises(
+            AuthError,
+            event_auth.check,
+            _random_state_event(pleb),
+            auth_events,
+            do_sig_check=False,
+        ),
+
+        # king should be able to send state
+        event_auth.check(_random_state_event(king), auth_events, do_sig_check=False)
+
+
+# helpers for making events
+
+TEST_ROOM_ID = "!test:room"
+
+
+def _create_event(user_id):
+    return FrozenEvent(
+        {
+            "room_id": TEST_ROOM_ID,
+            "event_id": _get_event_id(),
+            "type": "m.room.create",
+            "sender": user_id,
+            "content": {"creator": user_id},
+        }
+    )
+
+
+def _join_event(user_id):
+    return FrozenEvent(
+        {
+            "room_id": TEST_ROOM_ID,
+            "event_id": _get_event_id(),
+            "type": "m.room.member",
+            "sender": user_id,
+            "state_key": user_id,
+            "content": {"membership": "join"},
+        }
+    )
+
+
+def _power_levels_event(sender, content):
+    return FrozenEvent(
+        {
+            "room_id": TEST_ROOM_ID,
+            "event_id": _get_event_id(),
+            "type": "m.room.power_levels",
+            "sender": sender,
+            "state_key": "",
+            "content": content,
+        }
+    )
+
+
+def _random_state_event(sender):
+    return FrozenEvent(
+        {
+            "room_id": TEST_ROOM_ID,
+            "event_id": _get_event_id(),
+            "type": "test.state",
+            "sender": sender,
+            "state_key": "",
+            "content": {"membership": "join"},
+        }
+    )
+
+
+event_count = 0
+
+
+def _get_event_id():
+    global event_count
+    c = event_count
+    event_count += 1
+    return "!%i:example.com" % (c,)
diff --git a/tests/test_federation.py b/tests/test_federation.py
new file mode 100644
index 0000000000..2540604fcc
--- /dev/null
+++ b/tests/test_federation.py
@@ -0,0 +1,245 @@
+
+from mock import Mock
+
+from twisted.internet.defer import maybeDeferred, succeed
+
+from synapse.events import FrozenEvent
+from synapse.types import Requester, UserID
+from synapse.util import Clock
+
+from tests import unittest
+from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver
+
+
+class MessageAcceptTests(unittest.TestCase):
+    def setUp(self):
+
+        self.http_client = Mock()
+        self.reactor = ThreadedMemoryReactorClock()
+        self.hs_clock = Clock(self.reactor)
+        self.homeserver = setup_test_homeserver(
+            self.addCleanup,
+            http_client=self.http_client,
+            clock=self.hs_clock,
+            reactor=self.reactor,
+        )
+
+        user_id = UserID("us", "test")
+        our_user = Requester(user_id, None, False, None, None)
+        room_creator = self.homeserver.get_room_creation_handler()
+        room = room_creator.create_room(
+            our_user, room_creator.PRESETS_DICT["public_chat"], ratelimit=False
+        )
+        self.reactor.advance(0.1)
+        self.room_id = self.successResultOf(room)["room_id"]
+
+        # Figure out what the most recent event is
+        most_recent = self.successResultOf(
+            maybeDeferred(
+                self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
+            )
+        )[0]
+
+        join_event = FrozenEvent(
+            {
+                "room_id": self.room_id,
+                "sender": "@baduser:test.serv",
+                "state_key": "@baduser:test.serv",
+                "event_id": "$join:test.serv",
+                "depth": 1000,
+                "origin_server_ts": 1,
+                "type": "m.room.member",
+                "origin": "test.servx",
+                "content": {"membership": "join"},
+                "auth_events": [],
+                "prev_state": [(most_recent, {})],
+                "prev_events": [(most_recent, {})],
+            }
+        )
+
+        self.handler = self.homeserver.get_handlers().federation_handler
+        self.handler.do_auth = lambda *a, **b: succeed(True)
+        self.client = self.homeserver.get_federation_client()
+        self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed(
+            pdus
+        )
+
+        # Send the join, it should return None (which is not an error)
+        d = self.handler.on_receive_pdu(
+            "test.serv", join_event, sent_to_us_directly=True
+        )
+        self.reactor.advance(1)
+        self.assertEqual(self.successResultOf(d), None)
+
+        # Make sure we actually joined the room
+        self.assertEqual(
+            self.successResultOf(
+                maybeDeferred(
+                    self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
+                )
+            )[0],
+            "$join:test.serv",
+        )
+
+    def test_cant_hide_direct_ancestors(self):
+        """
+        If you send a message, you must be able to provide the direct
+        prev_events that said event references.
+        """
+
+        def post_json(destination, path, data, headers=None, timeout=0):
+            # If it asks us for new missing events, give them NOTHING
+            if path.startswith("/_matrix/federation/v1/get_missing_events/"):
+                return {"events": []}
+
+        self.http_client.post_json = post_json
+
+        # Figure out what the most recent event is
+        most_recent = self.successResultOf(
+            maybeDeferred(
+                self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
+            )
+        )[0]
+
+        # Now lie about an event
+        lying_event = FrozenEvent(
+            {
+                "room_id": self.room_id,
+                "sender": "@baduser:test.serv",
+                "event_id": "one:test.serv",
+                "depth": 1000,
+                "origin_server_ts": 1,
+                "type": "m.room.message",
+                "origin": "test.serv",
+                "content": "hewwo?",
+                "auth_events": [],
+                "prev_events": [("two:test.serv", {}), (most_recent, {})],
+            }
+        )
+
+        d = self.handler.on_receive_pdu(
+            "test.serv", lying_event, sent_to_us_directly=True
+        )
+
+        # Step the reactor, so the database fetches come back
+        self.reactor.advance(1)
+
+        # on_receive_pdu should throw an error
+        failure = self.failureResultOf(d)
+        self.assertEqual(
+            failure.value.args[0],
+            (
+                "ERROR 403: Your server isn't divulging details about prev_events "
+                "referenced in this event."
+            ),
+        )
+
+        # Make sure the invalid event isn't there
+        extrem = maybeDeferred(
+            self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
+        )
+        self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
+
+    def test_cant_hide_past_history(self):
+        """
+        If you send a message, you must be able to provide the direct
+        prev_events that said event references.
+        """
+
+        def post_json(destination, path, data, headers=None, timeout=0):
+            if path.startswith("/_matrix/federation/v1/get_missing_events/"):
+                return {
+                    "events": [
+                        {
+                            "room_id": self.room_id,
+                            "sender": "@baduser:test.serv",
+                            "event_id": "three:test.serv",
+                            "depth": 1000,
+                            "origin_server_ts": 1,
+                            "type": "m.room.message",
+                            "origin": "test.serv",
+                            "content": "hewwo?",
+                            "auth_events": [],
+                            "prev_events": [("four:test.serv", {})],
+                        }
+                    ]
+                }
+
+        self.http_client.post_json = post_json
+
+        def get_json(destination, path, args, headers=None):
+            if path.startswith("/_matrix/federation/v1/state_ids/"):
+                d = self.successResultOf(
+                    self.homeserver.datastore.get_state_ids_for_event("one:test.serv")
+                )
+
+                return succeed(
+                    {
+                        "pdu_ids": [
+                            y
+                            for x, y in d.items()
+                            if x == ("m.room.member", "@us:test")
+                        ],
+                        "auth_chain_ids": list(d.values()),
+                    }
+                )
+
+        self.http_client.get_json = get_json
+
+        # Figure out what the most recent event is
+        most_recent = self.successResultOf(
+            maybeDeferred(
+                self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
+            )
+        )[0]
+
+        # Make a good event
+        good_event = FrozenEvent(
+            {
+                "room_id": self.room_id,
+                "sender": "@baduser:test.serv",
+                "event_id": "one:test.serv",
+                "depth": 1000,
+                "origin_server_ts": 1,
+                "type": "m.room.message",
+                "origin": "test.serv",
+                "content": "hewwo?",
+                "auth_events": [],
+                "prev_events": [(most_recent, {})],
+            }
+        )
+
+        d = self.handler.on_receive_pdu(
+            "test.serv", good_event, sent_to_us_directly=True
+        )
+        self.reactor.advance(1)
+        self.assertEqual(self.successResultOf(d), None)
+
+        bad_event = FrozenEvent(
+            {
+                "room_id": self.room_id,
+                "sender": "@baduser:test.serv",
+                "event_id": "two:test.serv",
+                "depth": 1000,
+                "origin_server_ts": 1,
+                "type": "m.room.message",
+                "origin": "test.serv",
+                "content": "hewwo?",
+                "auth_events": [],
+                "prev_events": [("one:test.serv", {}), ("three:test.serv", {})],
+            }
+        )
+
+        d = self.handler.on_receive_pdu(
+            "test.serv", bad_event, sent_to_us_directly=True
+        )
+        self.reactor.advance(1)
+
+        extrem = maybeDeferred(
+            self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
+        )
+        self.assertEqual(self.successResultOf(extrem)[0], "two:test.serv")
+
+        state = self.homeserver.get_state_handler().get_current_state_ids(self.room_id)
+        self.reactor.advance(1)
+        self.assertIn(("m.room.member", "@us:test"), self.successResultOf(state).keys())
diff --git a/tests/test_mau.py b/tests/test_mau.py
new file mode 100644
index 0000000000..0732615447
--- /dev/null
+++ b/tests/test_mau.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests REST events for /rooms paths."""
+
+import json
+
+from mock import Mock, NonCallableMock
+
+from synapse.api.constants import LoginType
+from synapse.api.errors import Codes, HttpResponseException, SynapseError
+from synapse.http.server import JsonResource
+from synapse.rest.client.v2_alpha import register, sync
+from synapse.util import Clock
+
+from tests import unittest
+from tests.server import (
+    ThreadedMemoryReactorClock,
+    make_request,
+    render,
+    setup_test_homeserver,
+)
+
+
+class TestMauLimit(unittest.TestCase):
+    def setUp(self):
+        self.reactor = ThreadedMemoryReactorClock()
+        self.clock = Clock(self.reactor)
+
+        self.hs = setup_test_homeserver(
+            self.addCleanup,
+            "red",
+            http_client=None,
+            clock=self.clock,
+            reactor=self.reactor,
+            federation_client=Mock(),
+            ratelimiter=NonCallableMock(spec_set=["send_message"]),
+        )
+
+        self.store = self.hs.get_datastore()
+
+        self.hs.config.registrations_require_3pid = []
+        self.hs.config.enable_registration_captcha = False
+        self.hs.config.recaptcha_public_key = []
+
+        self.hs.config.limit_usage_by_mau = True
+        self.hs.config.hs_disabled = False
+        self.hs.config.max_mau_value = 2
+        self.hs.config.mau_trial_days = 0
+        self.hs.config.server_notices_mxid = "@server:red"
+        self.hs.config.server_notices_mxid_display_name = None
+        self.hs.config.server_notices_mxid_avatar_url = None
+        self.hs.config.server_notices_room_name = "Test Server Notice Room"
+
+        self.resource = JsonResource(self.hs)
+        register.register_servlets(self.hs, self.resource)
+        sync.register_servlets(self.hs, self.resource)
+
+    def test_simple_deny_mau(self):
+        # Create and sync so that the MAU counts get updated
+        token1 = self.create_user("kermit1")
+        self.do_sync_for_user(token1)
+        token2 = self.create_user("kermit2")
+        self.do_sync_for_user(token2)
+
+        # We've created and activated two users, we shouldn't be able to
+        # register new users
+        with self.assertRaises(SynapseError) as cm:
+            self.create_user("kermit3")
+
+        e = cm.exception
+        self.assertEqual(e.code, 403)
+        self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+
+    def test_allowed_after_a_month_mau(self):
+        # Create and sync so that the MAU counts get updated
+        token1 = self.create_user("kermit1")
+        self.do_sync_for_user(token1)
+        token2 = self.create_user("kermit2")
+        self.do_sync_for_user(token2)
+
+        # Advance time by 31 days
+        self.reactor.advance(31 * 24 * 60 * 60)
+
+        self.store.reap_monthly_active_users()
+
+        self.reactor.advance(0)
+
+        # We should be able to register more users
+        token3 = self.create_user("kermit3")
+        self.do_sync_for_user(token3)
+
+    def test_trial_delay(self):
+        self.hs.config.mau_trial_days = 1
+
+        # We should be able to register more than the limit initially
+        token1 = self.create_user("kermit1")
+        self.do_sync_for_user(token1)
+        token2 = self.create_user("kermit2")
+        self.do_sync_for_user(token2)
+        token3 = self.create_user("kermit3")
+        self.do_sync_for_user(token3)
+
+        # Advance time by 2 days
+        self.reactor.advance(2 * 24 * 60 * 60)
+
+        # Two users should be able to sync
+        self.do_sync_for_user(token1)
+        self.do_sync_for_user(token2)
+
+        # But the third should fail
+        with self.assertRaises(SynapseError) as cm:
+            self.do_sync_for_user(token3)
+
+        e = cm.exception
+        self.assertEqual(e.code, 403)
+        self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+
+        # And new registrations are now denied too
+        with self.assertRaises(SynapseError) as cm:
+            self.create_user("kermit4")
+
+        e = cm.exception
+        self.assertEqual(e.code, 403)
+        self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+
+    def test_trial_users_cant_come_back(self):
+        self.hs.config.mau_trial_days = 1
+
+        # We should be able to register more than the limit initially
+        token1 = self.create_user("kermit1")
+        self.do_sync_for_user(token1)
+        token2 = self.create_user("kermit2")
+        self.do_sync_for_user(token2)
+        token3 = self.create_user("kermit3")
+        self.do_sync_for_user(token3)
+
+        # Advance time by 2 days
+        self.reactor.advance(2 * 24 * 60 * 60)
+
+        # Two users should be able to sync
+        self.do_sync_for_user(token1)
+        self.do_sync_for_user(token2)
+
+        # Advance by 2 months so everyone falls out of MAU
+        self.reactor.advance(60 * 24 * 60 * 60)
+        self.store.reap_monthly_active_users()
+        self.reactor.advance(0)
+
+        # We can create as many new users as we want
+        token4 = self.create_user("kermit4")
+        self.do_sync_for_user(token4)
+        token5 = self.create_user("kermit5")
+        self.do_sync_for_user(token5)
+        token6 = self.create_user("kermit6")
+        self.do_sync_for_user(token6)
+
+        # users 2 and 3 can come back to bring us back up to MAU limit
+        self.do_sync_for_user(token2)
+        self.do_sync_for_user(token3)
+
+        # New trial users can still sync
+        self.do_sync_for_user(token4)
+        self.do_sync_for_user(token5)
+        self.do_sync_for_user(token6)
+
+        # But old user cant
+        with self.assertRaises(SynapseError) as cm:
+            self.do_sync_for_user(token1)
+
+        e = cm.exception
+        self.assertEqual(e.code, 403)
+        self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+
+    def create_user(self, localpart):
+        request_data = json.dumps({
+            "username": localpart,
+            "password": "monkey",
+            "auth": {"type": LoginType.DUMMY},
+        })
+
+        request, channel = make_request(b"POST", b"/register", request_data)
+        render(request, self.resource, self.reactor)
+
+        if channel.result["code"] != b"200":
+            raise HttpResponseException(
+                int(channel.result["code"]),
+                channel.result["reason"],
+                channel.result["body"],
+            ).to_synapse_error()
+
+        access_token = channel.json_body["access_token"]
+
+        return access_token
+
+    def do_sync_for_user(self, token):
+        request, channel = make_request(b"GET", b"/sync", access_token=token)
+        render(request, self.resource, self.reactor)
+
+        if channel.result["code"] != b"200":
+            raise HttpResponseException(
+                int(channel.result["code"]),
+                channel.result["reason"],
+                channel.result["body"],
+            ).to_synapse_error()
diff --git a/tests/test_preview.py b/tests/test_preview.py
index 5bd36c74aa..84ef5e5ba4 100644
--- a/tests/test_preview.py
+++ b/tests/test_preview.py
@@ -13,15 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from . import unittest
-
 from synapse.rest.media.v1.preview_url_resource import (
-    summarize_paragraphs, decode_and_calc_og
+    decode_and_calc_og,
+    summarize_paragraphs,
 )
 
+from . import unittest
 
-class PreviewTestCase(unittest.TestCase):
 
+class PreviewTestCase(unittest.TestCase):
     def test_long_summarize(self):
         example_paras = [
             u"""Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:
@@ -31,7 +31,6 @@ class PreviewTestCase(unittest.TestCase):
             alternative spellings of the city.Tromsø is considered the northernmost
             city in the world with a population above 50,000. The most populous town
             north of it is Alta, Norway, with a population of 14,272 (2013).""",
-
             u"""Tromsø lies in Northern Norway. The municipality has a population of
             (2015) 72,066, but with an annual influx of students it has over 75,000
             most of the year. It is the largest urban area in Northern Norway and the
@@ -45,7 +44,6 @@ class PreviewTestCase(unittest.TestCase):
             Sandnessund Bridge. Tromsø Airport connects the city to many destinations
             in Europe. The city is warmer than most other places located on the same
             latitude, due to the warming effect of the Gulf Stream.""",
-
             u"""The city centre of Tromsø contains the highest number of old wooden
             houses in Northern Norway, the oldest house dating from 1789. The Arctic
             Cathedral, a modern church from 1965, is probably the most famous landmark
@@ -66,7 +64,7 @@ class PreviewTestCase(unittest.TestCase):
             u" the city of Tromsø. Outside of Norway, Tromso and Tromsö are"
             u" alternative spellings of the city.Tromsø is considered the northernmost"
             u" city in the world with a population above 50,000. The most populous town"
-            u" north of it is Alta, Norway, with a population of 14,272 (2013)."
+            u" north of it is Alta, Norway, with a population of 14,272 (2013).",
         )
 
         desc = summarize_paragraphs(example_paras[1:], min_size=200, max_size=500)
@@ -79,7 +77,7 @@ class PreviewTestCase(unittest.TestCase):
             u" third largest north of the Arctic Circle (following Murmansk and Norilsk)."
             u" Most of Tromsø, including the city centre, is located on the island of"
             u" Tromsøya, 350 kilometres (217 mi) north of the Arctic Circle. In 2012,"
-            u" Tromsøya had a population of 36,088. Substantial parts of the urban…"
+            u" Tromsøya had a population of 36,088. Substantial parts of the urban…",
         )
 
     def test_short_summarize(self):
@@ -87,11 +85,9 @@ class PreviewTestCase(unittest.TestCase):
             u"Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:"
             u" Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in"
             u" Troms county, Norway.",
-
             u"Tromsø lies in Northern Norway. The municipality has a population of"
             u" (2015) 72,066, but with an annual influx of students it has over 75,000"
             u" most of the year.",
-
             u"The city centre of Tromsø contains the highest number of old wooden"
             u" houses in Northern Norway, the oldest house dating from 1789. The Arctic"
             u" Cathedral, a modern church from 1965, is probably the most famous landmark"
@@ -108,7 +104,7 @@ class PreviewTestCase(unittest.TestCase):
             u"\n"
             u"Tromsø lies in Northern Norway. The municipality has a population of"
             u" (2015) 72,066, but with an annual influx of students it has over 75,000"
-            u" most of the year."
+            u" most of the year.",
         )
 
     def test_small_then_large_summarize(self):
@@ -116,7 +112,6 @@ class PreviewTestCase(unittest.TestCase):
             u"Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:"
             u" Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in"
             u" Troms county, Norway.",
-
             u"Tromsø lies in Northern Norway. The municipality has a population of"
             u" (2015) 72,066, but with an annual influx of students it has over 75,000"
             u" most of the year."
@@ -137,7 +132,7 @@ class PreviewTestCase(unittest.TestCase):
             u" (2015) 72,066, but with an annual influx of students it has over 75,000"
             u" most of the year. The city centre of Tromsø contains the highest number"
             u" of old wooden houses in Northern Norway, the oldest house dating from"
-            u" 1789. The Arctic Cathedral, a modern church from…"
+            u" 1789. The Arctic Cathedral, a modern church from…",
         )
 
 
@@ -154,10 +149,7 @@ class PreviewUrlTestCase(unittest.TestCase):
 
         og = decode_and_calc_og(html, "http://example.com/test.html")
 
-        self.assertEquals(og, {
-            u"og:title": u"Foo",
-            u"og:description": u"Some text."
-        })
+        self.assertEquals(og, {u"og:title": u"Foo", u"og:description": u"Some text."})
 
     def test_comment(self):
         html = u"""
@@ -172,10 +164,7 @@ class PreviewUrlTestCase(unittest.TestCase):
 
         og = decode_and_calc_og(html, "http://example.com/test.html")
 
-        self.assertEquals(og, {
-            u"og:title": u"Foo",
-            u"og:description": u"Some text."
-        })
+        self.assertEquals(og, {u"og:title": u"Foo", u"og:description": u"Some text."})
 
     def test_comment2(self):
         html = u"""
@@ -193,10 +182,13 @@ class PreviewUrlTestCase(unittest.TestCase):
 
         og = decode_and_calc_og(html, "http://example.com/test.html")
 
-        self.assertEquals(og, {
-            u"og:title": u"Foo",
-            u"og:description": u"Some text.\n\nSome more text.\n\nText\n\nMore text"
-        })
+        self.assertEquals(
+            og,
+            {
+                u"og:title": u"Foo",
+                u"og:description": u"Some text.\n\nSome more text.\n\nText\n\nMore text",
+            },
+        )
 
     def test_script(self):
         html = u"""
@@ -211,10 +203,7 @@ class PreviewUrlTestCase(unittest.TestCase):
 
         og = decode_and_calc_og(html, "http://example.com/test.html")
 
-        self.assertEquals(og, {
-            u"og:title": u"Foo",
-            u"og:description": u"Some text."
-        })
+        self.assertEquals(og, {u"og:title": u"Foo", u"og:description": u"Some text."})
 
     def test_missing_title(self):
         html = u"""
@@ -227,10 +216,7 @@ class PreviewUrlTestCase(unittest.TestCase):
 
         og = decode_and_calc_og(html, "http://example.com/test.html")
 
-        self.assertEquals(og, {
-            u"og:title": None,
-            u"og:description": u"Some text."
-        })
+        self.assertEquals(og, {u"og:title": None, u"og:description": u"Some text."})
 
     def test_h1_as_title(self):
         html = u"""
@@ -244,10 +230,7 @@ class PreviewUrlTestCase(unittest.TestCase):
 
         og = decode_and_calc_og(html, "http://example.com/test.html")
 
-        self.assertEquals(og, {
-            u"og:title": u"Title",
-            u"og:description": u"Some text."
-        })
+        self.assertEquals(og, {u"og:title": u"Title", u"og:description": u"Some text."})
 
     def test_missing_title_and_broken_h1(self):
         html = u"""
@@ -261,7 +244,4 @@ class PreviewUrlTestCase(unittest.TestCase):
 
         og = decode_and_calc_og(html, "http://example.com/test.html")
 
-        self.assertEquals(og, {
-            u"og:title": None,
-            u"og:description": u"Some text."
-        })
+        self.assertEquals(og, {u"og:title": None, u"og:description": u"Some text."})
diff --git a/tests/test_server.py b/tests/test_server.py
new file mode 100644
index 0000000000..ef74544e93
--- /dev/null
+++ b/tests/test_server.py
@@ -0,0 +1,123 @@
+import re
+
+from twisted.internet.defer import Deferred
+from twisted.test.proto_helpers import MemoryReactorClock
+
+from synapse.api.errors import Codes, SynapseError
+from synapse.http.server import JsonResource
+from synapse.util import Clock
+
+from tests import unittest
+from tests.server import make_request, render, setup_test_homeserver
+
+
+class JsonResourceTests(unittest.TestCase):
+    def setUp(self):
+        self.reactor = MemoryReactorClock()
+        self.hs_clock = Clock(self.reactor)
+        self.homeserver = setup_test_homeserver(
+            self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.reactor
+        )
+
+    def test_handler_for_request(self):
+        """
+        JsonResource.handler_for_request gives correctly decoded URL args to
+        the callback, while Twisted will give the raw bytes of URL query
+        arguments.
+        """
+        got_kwargs = {}
+
+        def _callback(request, **kwargs):
+            got_kwargs.update(kwargs)
+            return (200, kwargs)
+
+        res = JsonResource(self.homeserver)
+        res.register_paths(
+            "GET", [re.compile("^/_matrix/foo/(?P<room_id>[^/]*)$")], _callback
+        )
+
+        request, channel = make_request(b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83")
+        render(request, res, self.reactor)
+
+        self.assertEqual(request.args, {b'a': [u"\N{SNOWMAN}".encode('utf8')]})
+        self.assertEqual(got_kwargs, {u"room_id": u"\N{SNOWMAN}"})
+
+    def test_callback_direct_exception(self):
+        """
+        If the web callback raises an uncaught exception, it will be translated
+        into a 500.
+        """
+
+        def _callback(request, **kwargs):
+            raise Exception("boo")
+
+        res = JsonResource(self.homeserver)
+        res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
+
+        request, channel = make_request(b"GET", b"/_matrix/foo")
+        render(request, res, self.reactor)
+
+        self.assertEqual(channel.result["code"], b'500')
+
+    def test_callback_indirect_exception(self):
+        """
+        If the web callback raises an uncaught exception in a Deferred, it will
+        be translated into a 500.
+        """
+
+        def _throw(*args):
+            raise Exception("boo")
+
+        def _callback(request, **kwargs):
+            d = Deferred()
+            d.addCallback(_throw)
+            self.reactor.callLater(1, d.callback, True)
+            return d
+
+        res = JsonResource(self.homeserver)
+        res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
+
+        request, channel = make_request(b"GET", b"/_matrix/foo")
+        render(request, res, self.reactor)
+
+        self.assertEqual(channel.result["code"], b'500')
+
+    def test_callback_synapseerror(self):
+        """
+        If the web callback raises a SynapseError, it returns the appropriate
+        status code and message set in it.
+        """
+
+        def _callback(request, **kwargs):
+            raise SynapseError(403, "Forbidden!!one!", Codes.FORBIDDEN)
+
+        res = JsonResource(self.homeserver)
+        res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
+
+        request, channel = make_request(b"GET", b"/_matrix/foo")
+        render(request, res, self.reactor)
+
+        self.assertEqual(channel.result["code"], b'403')
+        self.assertEqual(channel.json_body["error"], "Forbidden!!one!")
+        self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
+
+    def test_no_handler(self):
+        """
+        If there is no handler to process the request, Synapse will return 400.
+        """
+
+        def _callback(request, **kwargs):
+            """
+            Not ever actually called!
+            """
+            self.fail("shouldn't ever get here")
+
+        res = JsonResource(self.homeserver)
+        res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
+
+        request, channel = make_request(b"GET", b"/_matrix/foobar")
+        render(request, res, self.reactor)
+
+        self.assertEqual(channel.result["code"], b'400')
+        self.assertEqual(channel.json_body["error"], "Unrecognized request")
+        self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
diff --git a/tests/test_state.py b/tests/test_state.py
index a5c5e55951..452a123c3a 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -13,24 +13,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from tests import unittest
+from mock import Mock
+
 from twisted.internet import defer
 
-from synapse.events import FrozenEvent
 from synapse.api.auth import Auth
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventTypes, Membership, RoomVersions
+from synapse.events import FrozenEvent
 from synapse.state import StateHandler, StateResolutionHandler
 
-from .utils import MockClock
-
-from mock import Mock
+from tests import unittest
 
+from .utils import MockClock
 
 _next_event_id = 1000
 
 
-def create_event(name=None, type=None, state_key=None, depth=2, event_id=None,
-                 prev_events=[], **kwargs):
+def create_event(
+    name=None,
+    type=None,
+    state_key=None,
+    depth=2,
+    event_id=None,
+    prev_events=[],
+    **kwargs
+):
     global _next_event_id
 
     if not event_id:
@@ -39,9 +46,9 @@ def create_event(name=None, type=None, state_key=None, depth=2, event_id=None,
 
     if not name:
         if state_key is not None:
-            name = "<%s-%s, %s>" % (type, state_key, event_id,)
+            name = "<%s-%s, %s>" % (type, state_key, event_id)
         else:
-            name = "<%s, %s>" % (type, event_id,)
+            name = "<%s, %s>" % (type, event_id)
 
     d = {
         "event_id": event_id,
@@ -80,8 +87,9 @@ class StateGroupStore(object):
 
         return defer.succeed(groups)
 
-    def store_state_group(self, event_id, room_id, prev_group, delta_ids,
-                          current_state_ids):
+    def store_state_group(
+        self, event_id, room_id, prev_group, delta_ids, current_state_ids
+    ):
         state_group = self._next_group
         self._next_group += 1
 
@@ -91,7 +99,8 @@ class StateGroupStore(object):
 
     def get_events(self, event_ids, **kwargs):
         return {
-            e_id: self._event_id_to_event[e_id] for e_id in event_ids
+            e_id: self._event_id_to_event[e_id]
+            for e_id in event_ids
             if e_id in self._event_id_to_event
         }
 
@@ -108,6 +117,9 @@ class StateGroupStore(object):
     def register_event_id_state_group(self, event_id, state_group):
         self._event_to_state_group[event_id] = state_group
 
+    def get_room_version(self, room_id):
+        return RoomVersions.V1
+
 
 class DictObj(dict):
     def __init__(self, **kwargs):
@@ -129,9 +141,7 @@ class Graph(object):
                 prev_events = []
 
             events[event_id] = create_event(
-                event_id=event_id,
-                prev_events=prev_events,
-                **fields
+                event_id=event_id, prev_events=prev_events, **fields
             )
 
         self._leaves = clobbered
@@ -147,10 +157,15 @@ class Graph(object):
 class StateTestCase(unittest.TestCase):
     def setUp(self):
         self.store = StateGroupStore()
-        hs = Mock(spec_set=[
-            "get_datastore", "get_auth", "get_state_handler", "get_clock",
-            "get_state_resolution_handler",
-        ])
+        hs = Mock(
+            spec_set=[
+                "get_datastore",
+                "get_auth",
+                "get_state_handler",
+                "get_clock",
+                "get_state_resolution_handler",
+            ]
+        )
         hs.get_datastore.return_value = self.store
         hs.get_state_handler.return_value = None
         hs.get_clock.return_value = MockClock()
@@ -165,34 +180,14 @@ class StateTestCase(unittest.TestCase):
         graph = Graph(
             nodes={
                 "START": DictObj(
-                    type=EventTypes.Create,
-                    state_key="",
-                    depth=1,
-                ),
-                "A": DictObj(
-                    type=EventTypes.Message,
-                    depth=2,
-                ),
-                "B": DictObj(
-                    type=EventTypes.Message,
-                    depth=3,
-                ),
-                "C": DictObj(
-                    type=EventTypes.Name,
-                    state_key="",
-                    depth=3,
-                ),
-                "D": DictObj(
-                    type=EventTypes.Message,
-                    depth=4,
+                    type=EventTypes.Create, state_key="", content={}, depth=1,
                 ),
+                "A": DictObj(type=EventTypes.Message, depth=2),
+                "B": DictObj(type=EventTypes.Message, depth=3),
+                "C": DictObj(type=EventTypes.Name, state_key="", depth=3),
+                "D": DictObj(type=EventTypes.Message, depth=4),
             },
-            edges={
-                "A": ["START"],
-                "B": ["A"],
-                "C": ["A"],
-                "D": ["B", "C"]
-            }
+            edges={"A": ["START"], "B": ["A"], "C": ["A"], "D": ["B", "C"]},
         )
 
         self.store.register_events(graph.walk())
@@ -204,7 +199,8 @@ class StateTestCase(unittest.TestCase):
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
-        self.assertEqual(2, len(context_store["D"].prev_state_ids))
+        prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
+        self.assertEqual(2, len(prev_state_ids))
 
     @defer.inlineCallbacks
     def test_branch_basic_conflict(self):
@@ -223,27 +219,11 @@ class StateTestCase(unittest.TestCase):
                     membership=Membership.JOIN,
                     depth=2,
                 ),
-                "B": DictObj(
-                    type=EventTypes.Name,
-                    state_key="",
-                    depth=3,
-                ),
-                "C": DictObj(
-                    type=EventTypes.Name,
-                    state_key="",
-                    depth=4,
-                ),
-                "D": DictObj(
-                    type=EventTypes.Message,
-                    depth=5,
-                ),
+                "B": DictObj(type=EventTypes.Name, state_key="", depth=3),
+                "C": DictObj(type=EventTypes.Name, state_key="", depth=4),
+                "D": DictObj(type=EventTypes.Message, depth=5),
             },
-            edges={
-                "A": ["START"],
-                "B": ["A"],
-                "C": ["A"],
-                "D": ["B", "C"]
-            }
+            edges={"A": ["START"], "B": ["A"], "C": ["A"], "D": ["B", "C"]},
         )
 
         self.store.register_events(graph.walk())
@@ -255,9 +235,10 @@ class StateTestCase(unittest.TestCase):
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
+        prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
+
         self.assertSetEqual(
-            {"START", "A", "C"},
-            {e_id for e_id in context_store["D"].prev_state_ids.values()}
+            {"START", "A", "C"}, {e_id for e_id in prev_state_ids.values()}
         )
 
     @defer.inlineCallbacks
@@ -277,11 +258,7 @@ class StateTestCase(unittest.TestCase):
                     membership=Membership.JOIN,
                     depth=2,
                 ),
-                "B": DictObj(
-                    type=EventTypes.Name,
-                    state_key="",
-                    depth=3,
-                ),
+                "B": DictObj(type=EventTypes.Name, state_key="", depth=3),
                 "C": DictObj(
                     type=EventTypes.Member,
                     state_key="@user_id_2:example.com",
@@ -295,18 +272,9 @@ class StateTestCase(unittest.TestCase):
                     depth=4,
                     sender="@user_id_2:example.com",
                 ),
-                "E": DictObj(
-                    type=EventTypes.Message,
-                    depth=5,
-                ),
+                "E": DictObj(type=EventTypes.Message, depth=5),
             },
-            edges={
-                "A": ["START"],
-                "B": ["A"],
-                "C": ["B"],
-                "D": ["B"],
-                "E": ["C", "D"]
-            }
+            edges={"A": ["START"], "B": ["A"], "C": ["B"], "D": ["B"], "E": ["C", "D"]},
         )
 
         self.store.register_events(graph.walk())
@@ -318,9 +286,10 @@ class StateTestCase(unittest.TestCase):
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
+        prev_state_ids = yield context_store["E"].get_prev_state_ids(self.store)
+
         self.assertSetEqual(
-            {"START", "A", "B", "C"},
-            {e for e in context_store["E"].prev_state_ids.values()}
+            {"START", "A", "B", "C"}, {e for e in prev_state_ids.values()}
         )
 
     @defer.inlineCallbacks
@@ -352,30 +321,17 @@ class StateTestCase(unittest.TestCase):
                 state_key="",
                 content={
                     "events": {"m.room.name": 50},
-                    "users": {userid1: 100,
-                              userid2: 60},
+                    "users": {userid1: 100, userid2: 60},
                 },
             ),
-            "A5": DictObj(
-                type=EventTypes.Name,
-                state_key="",
-            ),
+            "A5": DictObj(type=EventTypes.Name, state_key=""),
             "B": DictObj(
                 type=EventTypes.PowerLevels,
                 state_key="",
-                content={
-                    "events": {"m.room.name": 50},
-                    "users": {userid2: 30},
-                },
-            ),
-            "C": DictObj(
-                type=EventTypes.Name,
-                state_key="",
-                sender=userid2,
-            ),
-            "D": DictObj(
-                type=EventTypes.Message,
+                content={"events": {"m.room.name": 50}, "users": {userid2: 30}},
             ),
+            "C": DictObj(type=EventTypes.Name, state_key="", sender=userid2),
+            "D": DictObj(type=EventTypes.Message),
         }
         edges = {
             "A2": ["A1"],
@@ -384,7 +340,7 @@ class StateTestCase(unittest.TestCase):
             "A5": ["A4"],
             "B": ["A5"],
             "C": ["A5"],
-            "D": ["B", "C"]
+            "D": ["B", "C"],
         }
         self._add_depths(nodes, edges)
         graph = Graph(nodes, edges)
@@ -398,9 +354,10 @@ class StateTestCase(unittest.TestCase):
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
+        prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
+
         self.assertSetEqual(
-            {"A1", "A2", "A3", "A5", "B"},
-            {e for e in context_store["D"].prev_state_ids.values()}
+            {"A1", "A2", "A3", "A5", "B"}, {e for e in prev_state_ids.values()}
         )
 
     def _add_depths(self, nodes, edges):
@@ -425,12 +382,12 @@ class StateTestCase(unittest.TestCase):
             create_event(type="test2", state_key=""),
         ]
 
-        context = yield self.state.compute_event_context(
-            event, old_state=old_state
-        )
+        context = yield self.state.compute_event_context(event, old_state=old_state)
+
+        current_state_ids = yield context.get_current_state_ids(self.store)
 
         self.assertEqual(
-            set(e.event_id for e in old_state), set(context.current_state_ids.values())
+            set(e.event_id for e in old_state), set(current_state_ids.values())
         )
 
         self.assertIsNotNone(context.state_group)
@@ -445,20 +402,19 @@ class StateTestCase(unittest.TestCase):
             create_event(type="test2", state_key=""),
         ]
 
-        context = yield self.state.compute_event_context(
-            event, old_state=old_state
-        )
+        context = yield self.state.compute_event_context(event, old_state=old_state)
+
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
 
         self.assertEqual(
-            set(e.event_id for e in old_state), set(context.prev_state_ids.values())
+            set(e.event_id for e in old_state), set(prev_state_ids.values())
         )
 
     @defer.inlineCallbacks
     def test_trivial_annotate_message(self):
         prev_event_id = "prev_event_id"
         event = create_event(
-            type="test_message", name="event2",
-            prev_events=[(prev_event_id, {})],
+            type="test_message", name="event2", prev_events=[(prev_event_id, {})]
         )
 
         old_state = [
@@ -468,16 +424,20 @@ class StateTestCase(unittest.TestCase):
         ]
 
         group_name = self.store.store_state_group(
-            prev_event_id, event.room_id, None, None,
+            prev_event_id,
+            event.room_id,
+            None,
+            None,
             {(e.type, e.state_key): e.event_id for e in old_state},
         )
         self.store.register_event_id_state_group(prev_event_id, group_name)
 
         context = yield self.state.compute_event_context(event)
 
+        current_state_ids = yield context.get_current_state_ids(self.store)
+
         self.assertEqual(
-            set([e.event_id for e in old_state]),
-            set(context.current_state_ids.values())
+            set([e.event_id for e in old_state]), set(current_state_ids.values())
         )
 
         self.assertEqual(group_name, context.state_group)
@@ -486,8 +446,7 @@ class StateTestCase(unittest.TestCase):
     def test_trivial_annotate_state(self):
         prev_event_id = "prev_event_id"
         event = create_event(
-            type="state", state_key="", name="event2",
-            prev_events=[(prev_event_id, {})],
+            type="state", state_key="", name="event2", prev_events=[(prev_event_id, {})]
         )
 
         old_state = [
@@ -497,16 +456,20 @@ class StateTestCase(unittest.TestCase):
         ]
 
         group_name = self.store.store_state_group(
-            prev_event_id, event.room_id, None, None,
+            prev_event_id,
+            event.room_id,
+            None,
+            None,
             {(e.type, e.state_key): e.event_id for e in old_state},
         )
         self.store.register_event_id_state_group(prev_event_id, group_name)
 
         context = yield self.state.compute_event_context(event)
 
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
+
         self.assertEqual(
-            set([e.event_id for e in old_state]),
-            set(context.prev_state_ids.values())
+            set([e.event_id for e in old_state]), set(prev_state_ids.values())
         )
 
         self.assertIsNotNone(context.state_group)
@@ -516,13 +479,12 @@ class StateTestCase(unittest.TestCase):
         prev_event_id1 = "event_id1"
         prev_event_id2 = "event_id2"
         event = create_event(
-            type="test_message", name="event3",
+            type="test_message",
+            name="event3",
             prev_events=[(prev_event_id1, {}), (prev_event_id2, {})],
         )
 
-        creation = create_event(
-            type=EventTypes.Create, state_key=""
-        )
+        creation = create_event(type=EventTypes.Create, state_key="")
 
         old_state_1 = [
             creation,
@@ -542,10 +504,12 @@ class StateTestCase(unittest.TestCase):
         self.store.register_events(old_state_2)
 
         context = yield self._get_context(
-            event, prev_event_id1, old_state_1, prev_event_id2, old_state_2,
+            event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        self.assertEqual(len(context.current_state_ids), 6)
+        current_state_ids = yield context.get_current_state_ids(self.store)
+
+        self.assertEqual(len(current_state_ids), 6)
 
         self.assertIsNotNone(context.state_group)
 
@@ -554,13 +518,13 @@ class StateTestCase(unittest.TestCase):
         prev_event_id1 = "event_id1"
         prev_event_id2 = "event_id2"
         event = create_event(
-            type="test4", state_key="", name="event",
+            type="test4",
+            state_key="",
+            name="event",
             prev_events=[(prev_event_id1, {}), (prev_event_id2, {})],
         )
 
-        creation = create_event(
-            type=EventTypes.Create, state_key=""
-        )
+        creation = create_event(type=EventTypes.Create, state_key="")
 
         old_state_1 = [
             creation,
@@ -582,10 +546,12 @@ class StateTestCase(unittest.TestCase):
         self.store.get_events = store.get_events
 
         context = yield self._get_context(
-            event, prev_event_id1, old_state_1, prev_event_id2, old_state_2,
+            event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        self.assertEqual(len(context.current_state_ids), 6)
+        current_state_ids = yield context.get_current_state_ids(self.store)
+
+        self.assertEqual(len(current_state_ids), 6)
 
         self.assertIsNotNone(context.state_group)
 
@@ -594,31 +560,37 @@ class StateTestCase(unittest.TestCase):
         prev_event_id1 = "event_id1"
         prev_event_id2 = "event_id2"
         event = create_event(
-            type="test4", name="event",
+            type="test4",
+            name="event",
             prev_events=[(prev_event_id1, {}), (prev_event_id2, {})],
         )
 
         member_event = create_event(
             type=EventTypes.Member,
             state_key="@user_id:example.com",
-            content={
-                "membership": Membership.JOIN,
-            }
+            content={"membership": Membership.JOIN},
+        )
+
+        power_levels = create_event(
+            type=EventTypes.PowerLevels,
+            state_key="",
+            content={"users": {"@foo:bar": "100", "@user_id:example.com": "100"}},
         )
 
         creation = create_event(
-            type=EventTypes.Create, state_key="",
-            content={"creator": "@foo:bar"}
+            type=EventTypes.Create, state_key="", content={"creator": "@foo:bar"}
         )
 
         old_state_1 = [
             creation,
+            power_levels,
             member_event,
             create_event(type="test1", state_key="1", depth=1),
         ]
 
         old_state_2 = [
             creation,
+            power_levels,
             member_event,
             create_event(type="test1", state_key="1", depth=2),
         ]
@@ -629,24 +601,26 @@ class StateTestCase(unittest.TestCase):
         self.store.get_events = store.get_events
 
         context = yield self._get_context(
-            event, prev_event_id1, old_state_1, prev_event_id2, old_state_2,
+            event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        self.assertEqual(
-            old_state_2[2].event_id, context.current_state_ids[("test1", "1")]
-        )
+        current_state_ids = yield context.get_current_state_ids(self.store)
+
+        self.assertEqual(old_state_2[3].event_id, current_state_ids[("test1", "1")])
 
         # Reverse the depth to make sure we are actually using the depths
         # during state resolution.
 
         old_state_1 = [
             creation,
+            power_levels,
             member_event,
             create_event(type="test1", state_key="1", depth=2),
         ]
 
         old_state_2 = [
             creation,
+            power_levels,
             member_event,
             create_event(type="test1", state_key="1", depth=1),
         ]
@@ -655,23 +629,30 @@ class StateTestCase(unittest.TestCase):
         store.register_events(old_state_2)
 
         context = yield self._get_context(
-            event, prev_event_id1, old_state_1, prev_event_id2, old_state_2,
+            event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        self.assertEqual(
-            old_state_1[2].event_id, context.current_state_ids[("test1", "1")]
-        )
+        current_state_ids = yield context.get_current_state_ids(self.store)
+
+        self.assertEqual(old_state_1[3].event_id, current_state_ids[("test1", "1")])
 
-    def _get_context(self, event, prev_event_id_1, old_state_1, prev_event_id_2,
-                     old_state_2):
+    def _get_context(
+        self, event, prev_event_id_1, old_state_1, prev_event_id_2, old_state_2
+    ):
         sg1 = self.store.store_state_group(
-            prev_event_id_1, event.room_id, None, None,
+            prev_event_id_1,
+            event.room_id,
+            None,
+            None,
             {(e.type, e.state_key): e.event_id for e in old_state_1},
         )
         self.store.register_event_id_state_group(prev_event_id_1, sg1)
 
         sg2 = self.store.store_state_group(
-            prev_event_id_2, event.room_id, None, None,
+            prev_event_id_2,
+            event.room_id,
+            None,
+            None,
             {(e.type, e.state_key): e.event_id for e in old_state_2},
         )
         self.store.register_event_id_state_group(prev_event_id_2, sg2)
diff --git a/tests/test_test_utils.py b/tests/test_test_utils.py
index d28bb726bb..b921ac52c0 100644
--- a/tests/test_test_utils.py
+++ b/tests/test_test_utils.py
@@ -14,12 +14,10 @@
 # limitations under the License.
 
 from tests import unittest
-
 from tests.utils import MockClock
 
 
 class MockClockTestCase(unittest.TestCase):
-
     def setUp(self):
         self.clock = MockClock()
 
@@ -35,10 +33,12 @@ class MockClockTestCase(unittest.TestCase):
 
         def _cb0():
             invoked[0] = 1
+
         self.clock.call_later(10, _cb0)
 
         def _cb1():
             invoked[1] = 1
+
         self.clock.call_later(20, _cb1)
 
         self.assertFalse(invoked[0])
@@ -57,10 +57,12 @@ class MockClockTestCase(unittest.TestCase):
 
         def _cb0():
             invoked[0] = 1
+
         t0 = self.clock.call_later(10, _cb0)
 
         def _cb1():
             invoked[1] = 1
+
         self.clock.call_later(20, _cb1)
 
         self.clock.cancel_call_later(t0)
diff --git a/tests/test_types.py b/tests/test_types.py
index 115def2287..0f5c8bfaf9 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -13,13 +13,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from tests import unittest
-
 from synapse.api.errors import SynapseError
-from synapse.server import HomeServer
-from synapse.types import UserID, RoomAlias, GroupID
+from synapse.types import GroupID, RoomAlias, UserID
+
+from tests import unittest
+from tests.utils import TestHomeServer
 
-mock_homeserver = HomeServer(hostname="my.domain")
+mock_homeserver = TestHomeServer(hostname="my.domain")
 
 
 class UserIDTestCase(unittest.TestCase):
@@ -69,10 +69,7 @@ class GroupIDTestCase(unittest.TestCase):
         self.assertEqual("my.domain", group_id.domain)
 
     def test_validate(self):
-        bad_ids = [
-            "$badsigil:domain",
-            "+:empty",
-        ] + [
+        bad_ids = ["$badsigil:domain", "+:empty"] + [
             "+group" + c + ":domain" for c in "A%?æ£"
         ]
         for id_string in bad_ids:
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
new file mode 100644
index 0000000000..8d8ce0cab9
--- /dev/null
+++ b/tests/test_visibility.py
@@ -0,0 +1,329 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from twisted.internet import defer
+from twisted.internet.defer import succeed
+
+from synapse.events import FrozenEvent
+from synapse.visibility import filter_events_for_server
+
+import tests.unittest
+from tests.utils import create_room, setup_test_homeserver
+
+logger = logging.getLogger(__name__)
+
+TEST_ROOM_ID = "!TEST:ROOM"
+
+
+class FilterEventsForServerTestCase(tests.unittest.TestCase):
+    @defer.inlineCallbacks
+    def setUp(self):
+        self.hs = yield setup_test_homeserver(self.addCleanup)
+        self.event_creation_handler = self.hs.get_event_creation_handler()
+        self.event_builder_factory = self.hs.get_event_builder_factory()
+        self.store = self.hs.get_datastore()
+
+        yield create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")
+
+    @defer.inlineCallbacks
+    def test_filtering(self):
+        #
+        # The events to be filtered consist of 10 membership events (it doesn't
+        # really matter if they are joins or leaves, so let's make them joins).
+        # One of those membership events is going to be for a user on the
+        # server we are filtering for (so we can check the filtering is doing
+        # the right thing).
+        #
+
+        # before we do that, we persist some other events to act as state.
+        self.inject_visibility("@admin:hs", "joined")
+        for i in range(0, 10):
+            yield self.inject_room_member("@resident%i:hs" % i)
+
+        events_to_filter = []
+
+        for i in range(0, 10):
+            user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
+            evt = yield self.inject_room_member(user, extra_content={"a": "b"})
+            events_to_filter.append(evt)
+
+        filtered = yield filter_events_for_server(
+            self.store, "test_server", events_to_filter
+        )
+
+        # the result should be 5 redacted events, and 5 unredacted events.
+        for i in range(0, 5):
+            self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
+            self.assertNotIn("a", filtered[i].content)
+
+        for i in range(5, 10):
+            self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
+            self.assertEqual(filtered[i].content["a"], "b")
+
+    @tests.unittest.DEBUG
+    @defer.inlineCallbacks
+    def test_erased_user(self):
+        # 4 message events, from erased and unerased users, with a membership
+        # change in the middle of them.
+        events_to_filter = []
+
+        evt = yield self.inject_message("@unerased:local_hs")
+        events_to_filter.append(evt)
+
+        evt = yield self.inject_message("@erased:local_hs")
+        events_to_filter.append(evt)
+
+        evt = yield self.inject_room_member("@joiner:remote_hs")
+        events_to_filter.append(evt)
+
+        evt = yield self.inject_message("@unerased:local_hs")
+        events_to_filter.append(evt)
+
+        evt = yield self.inject_message("@erased:local_hs")
+        events_to_filter.append(evt)
+
+        # the erasey user gets erased
+        self.hs.get_datastore().mark_user_erased("@erased:local_hs")
+
+        # ... and the filtering happens.
+        filtered = yield filter_events_for_server(
+            self.store, "test_server", events_to_filter
+        )
+
+        for i in range(0, len(events_to_filter)):
+            self.assertEqual(
+                events_to_filter[i].event_id,
+                filtered[i].event_id,
+                "Unexpected event at result position %i" % (i,),
+            )
+
+        for i in (0, 3):
+            self.assertEqual(
+                events_to_filter[i].content["body"],
+                filtered[i].content["body"],
+                "Unexpected event content at result position %i" % (i,),
+            )
+
+        for i in (1, 4):
+            self.assertNotIn("body", filtered[i].content)
+
+    @defer.inlineCallbacks
+    def inject_visibility(self, user_id, visibility):
+        content = {"history_visibility": visibility}
+        builder = self.event_builder_factory.new(
+            {
+                "type": "m.room.history_visibility",
+                "sender": user_id,
+                "state_key": "",
+                "room_id": TEST_ROOM_ID,
+                "content": content,
+            }
+        )
+
+        event, context = yield self.event_creation_handler.create_new_client_event(
+            builder
+        )
+        yield self.hs.get_datastore().persist_event(event, context)
+        defer.returnValue(event)
+
+    @defer.inlineCallbacks
+    def inject_room_member(self, user_id, membership="join", extra_content={}):
+        content = {"membership": membership}
+        content.update(extra_content)
+        builder = self.event_builder_factory.new(
+            {
+                "type": "m.room.member",
+                "sender": user_id,
+                "state_key": user_id,
+                "room_id": TEST_ROOM_ID,
+                "content": content,
+            }
+        )
+
+        event, context = yield self.event_creation_handler.create_new_client_event(
+            builder
+        )
+
+        yield self.hs.get_datastore().persist_event(event, context)
+        defer.returnValue(event)
+
+    @defer.inlineCallbacks
+    def inject_message(self, user_id, content=None):
+        if content is None:
+            content = {"body": "testytest"}
+        builder = self.event_builder_factory.new(
+            {
+                "type": "m.room.message",
+                "sender": user_id,
+                "room_id": TEST_ROOM_ID,
+                "content": content,
+            }
+        )
+
+        event, context = yield self.event_creation_handler.create_new_client_event(
+            builder
+        )
+
+        yield self.hs.get_datastore().persist_event(event, context)
+        defer.returnValue(event)
+
+    @defer.inlineCallbacks
+    def test_large_room(self):
+        # see what happens when we have a large room with hundreds of thousands
+        # of membership events
+
+        # As above, the events to be filtered consist of 10 membership events,
+        # where one of them is for a user on the server we are filtering for.
+
+        import cProfile
+        import pstats
+        import time
+
+        # we stub out the store, because building up all that state the normal
+        # way is very slow.
+        test_store = _TestStore()
+
+        # our initial state is 100000 membership events and one
+        # history_visibility event.
+        room_state = []
+
+        history_visibility_evt = FrozenEvent(
+            {
+                "event_id": "$history_vis",
+                "type": "m.room.history_visibility",
+                "sender": "@resident_user_0:test.com",
+                "state_key": "",
+                "room_id": TEST_ROOM_ID,
+                "content": {"history_visibility": "joined"},
+            }
+        )
+        room_state.append(history_visibility_evt)
+        test_store.add_event(history_visibility_evt)
+
+        for i in range(0, 100000):
+            user = "@resident_user_%i:test.com" % (i,)
+            evt = FrozenEvent(
+                {
+                    "event_id": "$res_event_%i" % (i,),
+                    "type": "m.room.member",
+                    "state_key": user,
+                    "sender": user,
+                    "room_id": TEST_ROOM_ID,
+                    "content": {"membership": "join", "extra": "zzz,"},
+                }
+            )
+            room_state.append(evt)
+            test_store.add_event(evt)
+
+        events_to_filter = []
+        for i in range(0, 10):
+            user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
+            evt = FrozenEvent(
+                {
+                    "event_id": "$evt%i" % (i,),
+                    "type": "m.room.member",
+                    "state_key": user,
+                    "sender": user,
+                    "room_id": TEST_ROOM_ID,
+                    "content": {"membership": "join", "extra": "zzz"},
+                }
+            )
+            events_to_filter.append(evt)
+            room_state.append(evt)
+
+            test_store.add_event(evt)
+            test_store.set_state_ids_for_event(
+                evt, {(e.type, e.state_key): e.event_id for e in room_state}
+            )
+
+        pr = cProfile.Profile()
+        pr.enable()
+
+        logger.info("Starting filtering")
+        start = time.time()
+        filtered = yield filter_events_for_server(
+            test_store, "test_server", events_to_filter
+        )
+        logger.info("Filtering took %f seconds", time.time() - start)
+
+        pr.disable()
+        with open("filter_events_for_server.profile", "w+") as f:
+            ps = pstats.Stats(pr, stream=f).sort_stats('cumulative')
+            ps.print_stats()
+
+        # the result should be 5 redacted events, and 5 unredacted events.
+        for i in range(0, 5):
+            self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
+            self.assertNotIn("extra", filtered[i].content)
+
+        for i in range(5, 10):
+            self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
+            self.assertEqual(filtered[i].content["extra"], "zzz")
+
+    test_large_room.skip = "Disabled by default because it's slow"
+
+
+class _TestStore(object):
+    """Implements a few methods of the DataStore, so that we can test
+    filter_events_for_server
+
+    """
+
+    def __init__(self):
+        # data for get_events: a map from event_id to event
+        self.events = {}
+
+        # data for get_state_ids_for_events mock: a map from event_id to
+        # a map from (type_state_key) -> event_id for the state at that
+        # event
+        self.state_ids_for_events = {}
+
+    def add_event(self, event):
+        self.events[event.event_id] = event
+
+    def set_state_ids_for_event(self, event, state):
+        self.state_ids_for_events[event.event_id] = state
+
+    def get_state_ids_for_events(self, events, types):
+        res = {}
+        include_memberships = False
+        for (type, state_key) in types:
+            if type == "m.room.history_visibility":
+                continue
+            if type != "m.room.member" or state_key is not None:
+                raise RuntimeError(
+                    "Unimplemented: get_state_ids with type (%s, %s)"
+                    % (type, state_key)
+                )
+            include_memberships = True
+
+        if include_memberships:
+            for event_id in events:
+                res[event_id] = self.state_ids_for_events[event_id]
+
+        else:
+            k = ("m.room.history_visibility", "")
+            for event_id in events:
+                hve = self.state_ids_for_events[event_id][k]
+                res[event_id] = {k: hve}
+
+        return succeed(res)
+
+    def get_events(self, events):
+        return succeed({event_id: self.events[event_id] for event_id in events})
+
+    def are_users_erased(self, users):
+        return succeed({u: False for u in users})
diff --git a/tests/unittest.py b/tests/unittest.py
index 184fe880f3..8b513bb32b 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,12 +16,21 @@
 
 import logging
 
+from mock import Mock
+
+from canonicaljson import json
+
 import twisted
 import twisted.logger
 from twisted.trial import unittest
 
+from synapse.http.server import JsonResource
+from synapse.server import HomeServer
+from synapse.types import UserID, create_requester
 from synapse.util.logcontext import LoggingContextFilter
 
+from tests.server import get_clock, make_request, render, setup_test_homeserver
+
 # Set up putting Synapse's logs into Trial's.
 rootLogger = logging.getLogger()
 
@@ -35,7 +45,10 @@ class ToTwistedHandler(logging.Handler):
     def emit(self, record):
         log_entry = self.format(record)
         log_level = record.levelname.lower().replace('warning', 'warn')
-        self.tx_log.emit(twisted.logger.LogLevel.levelWithName(log_level), log_entry)
+        self.tx_log.emit(
+            twisted.logger.LogLevel.levelWithName(log_level),
+            log_entry.replace("{", r"(").replace("}", r")"),
+        )
 
 
 handler = ToTwistedHandler()
@@ -53,6 +66,7 @@ def around(target):
     def method_name(orig, *args, **kwargs):
         return orig(*args, **kwargs)
     """
+
     def _around(code):
         name = code.__name__
         orig = getattr(target, name)
@@ -86,6 +100,7 @@ class TestCase(unittest.TestCase):
             old_level = logging.getLogger().level
 
             if old_level != level:
+
                 @around(self)
                 def tearDown(orig):
                     ret = orig()
@@ -106,9 +121,172 @@ class TestCase(unittest.TestCase):
             except AssertionError as e:
                 raise (type(e))(e.message + " for '.%s'" % key)
 
+    def assert_dict(self, required, actual):
+        """Does a partial assert of a dict.
+
+        Args:
+            required (dict): The keys and value which MUST be in 'actual'.
+            actual (dict): The test result. Extra keys will not be checked.
+        """
+        for key in required:
+            self.assertEquals(
+                required[key], actual[key], msg="%s mismatch. %s" % (key, actual)
+            )
+
 
 def DEBUG(target):
     """A decorator to set the .loglevel attribute to logging.DEBUG.
     Can apply to either a TestCase or an individual test method."""
     target.loglevel = logging.DEBUG
     return target
+
+
+class HomeserverTestCase(TestCase):
+    """
+    A base TestCase that reduces boilerplate for HomeServer-using test cases.
+
+    Attributes:
+        servlets (list[function]): List of servlet registration function.
+        user_id (str): The user ID to assume if auth is hijacked.
+        hijack_auth (bool): Whether to hijack auth to return the user specified
+        in user_id.
+    """
+
+    servlets = []
+    hijack_auth = True
+
+    def setUp(self):
+        """
+        Set up the TestCase by calling the homeserver constructor, optionally
+        hijacking the authentication system to return a fixed user, and then
+        calling the prepare function.
+        """
+        self.reactor, self.clock = get_clock()
+        self._hs_args = {"clock": self.clock, "reactor": self.reactor}
+        self.hs = self.make_homeserver(self.reactor, self.clock)
+
+        if self.hs is None:
+            raise Exception("No homeserver returned from make_homeserver.")
+
+        if not isinstance(self.hs, HomeServer):
+            raise Exception("A homeserver wasn't returned, but %r" % (self.hs,))
+
+        # Register the resources
+        self.resource = JsonResource(self.hs)
+
+        for servlet in self.servlets:
+            servlet(self.hs, self.resource)
+
+        if hasattr(self, "user_id"):
+            from tests.rest.client.v1.utils import RestHelper
+
+            self.helper = RestHelper(self.hs, self.resource, self.user_id)
+
+            if self.hijack_auth:
+
+                def get_user_by_access_token(token=None, allow_guest=False):
+                    return {
+                        "user": UserID.from_string(self.helper.auth_user_id),
+                        "token_id": 1,
+                        "is_guest": False,
+                    }
+
+                def get_user_by_req(request, allow_guest=False, rights="access"):
+                    return create_requester(
+                        UserID.from_string(self.helper.auth_user_id), 1, False, None
+                    )
+
+                self.hs.get_auth().get_user_by_req = get_user_by_req
+                self.hs.get_auth().get_user_by_access_token = get_user_by_access_token
+                self.hs.get_auth().get_access_token_from_request = Mock(
+                    return_value="1234"
+                )
+
+        if hasattr(self, "prepare"):
+            self.prepare(self.reactor, self.clock, self.hs)
+
+    def make_homeserver(self, reactor, clock):
+        """
+        Make and return a homeserver.
+
+        Args:
+            reactor: A Twisted Reactor, or something that pretends to be one.
+            clock (synapse.util.Clock): The Clock, associated with the reactor.
+
+        Returns:
+            A homeserver (synapse.server.HomeServer) suitable for testing.
+
+        Function to be overridden in subclasses.
+        """
+        raise NotImplementedError()
+
+    def prepare(self, reactor, clock, homeserver):
+        """
+        Prepare for the test.  This involves things like mocking out parts of
+        the homeserver, or building test data common across the whole test
+        suite.
+
+        Args:
+            reactor: A Twisted Reactor, or something that pretends to be one.
+            clock (synapse.util.Clock): The Clock, associated with the reactor.
+            homeserver (synapse.server.HomeServer): The HomeServer to test
+            against.
+
+        Function to optionally be overridden in subclasses.
+        """
+
+    def make_request(self, method, path, content=b""):
+        """
+        Create a SynapseRequest at the path using the method and containing the
+        given content.
+
+        Args:
+            method (bytes/unicode): The HTTP request method ("verb").
+            path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
+            escaped UTF-8 & spaces and such).
+            content (bytes or dict): The body of the request. JSON-encoded, if
+            a dict.
+
+        Returns:
+            A synapse.http.site.SynapseRequest.
+        """
+        if isinstance(content, dict):
+            content = json.dumps(content).encode('utf8')
+
+        return make_request(method, path, content)
+
+    def render(self, request):
+        """
+        Render a request against the resources registered by the test class's
+        servlets.
+
+        Args:
+            request (synapse.http.site.SynapseRequest): The request to render.
+        """
+        render(request, self.resource, self.reactor)
+
+    def setup_test_homeserver(self, *args, **kwargs):
+        """
+        Set up the test homeserver, meant to be called by the overridable
+        make_homeserver. It automatically passes through the test class's
+        clock & reactor.
+
+        Args:
+            See tests.utils.setup_test_homeserver.
+
+        Returns:
+            synapse.server.HomeServer
+        """
+        kwargs = dict(kwargs)
+        kwargs.update(self._hs_args)
+        return setup_test_homeserver(self.addCleanup, *args, **kwargs)
+
+    def pump(self):
+        """
+        Pump the reactor enough that Deferreds will fire.
+        """
+        self.reactor.pump([0.0] * 100)
+
+    def get_success(self, d):
+        self.pump()
+        return self.successResultOf(d)
diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py
index 2516fe40f4..463a737efa 100644
--- a/tests/util/caches/test_descriptors.py
+++ b/tests/util/caches/test_descriptors.py
@@ -13,20 +13,28 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from functools import partial
 import logging
+from functools import partial
 
 import mock
+
+from twisted.internet import defer, reactor
+
 from synapse.api.errors import SynapseError
-from synapse.util import async
 from synapse.util import logcontext
-from twisted.internet import defer
 from synapse.util.caches import descriptors
+
 from tests import unittest
 
 logger = logging.getLogger(__name__)
 
 
+def run_on_reactor():
+    d = defer.Deferred()
+    reactor.callLater(0, d.callback, 0)
+    return logcontext.make_deferred_yieldable(d)
+
+
 class CacheTestCase(unittest.TestCase):
     def test_invalidate_all(self):
         cache = descriptors.Cache("testcache")
@@ -59,12 +67,8 @@ class CacheTestCase(unittest.TestCase):
         self.assertIsNone(cache.get("key2", None))
 
         # both callbacks should have been callbacked
-        self.assertTrue(
-            callback_record[0], "Invalidation callback for key1 not called",
-        )
-        self.assertTrue(
-            callback_record[1], "Invalidation callback for key2 not called",
-        )
+        self.assertTrue(callback_record[0], "Invalidation callback for key1 not called")
+        self.assertTrue(callback_record[1], "Invalidation callback for key2 not called")
 
         # letting the other lookup complete should do nothing
         d1.callback("result1")
@@ -160,8 +164,7 @@ class DescriptorTestCase(unittest.TestCase):
             with logcontext.LoggingContext() as c1:
                 c1.name = "c1"
                 r = yield obj.fn(1)
-                self.assertEqual(logcontext.LoggingContext.current_context(),
-                                 c1)
+                self.assertEqual(logcontext.LoggingContext.current_context(), c1)
             defer.returnValue(r)
 
         def check_result(r):
@@ -171,14 +174,18 @@ class DescriptorTestCase(unittest.TestCase):
 
         # set off a deferred which will do a cache lookup
         d1 = do_lookup()
-        self.assertEqual(logcontext.LoggingContext.current_context(),
-                         logcontext.LoggingContext.sentinel)
+        self.assertEqual(
+            logcontext.LoggingContext.current_context(),
+            logcontext.LoggingContext.sentinel,
+        )
         d1.addCallback(check_result)
 
         # and another
         d2 = do_lookup()
-        self.assertEqual(logcontext.LoggingContext.current_context(),
-                         logcontext.LoggingContext.sentinel)
+        self.assertEqual(
+            logcontext.LoggingContext.current_context(),
+            logcontext.LoggingContext.sentinel,
+        )
         d2.addCallback(check_result)
 
         # let the lookup complete
@@ -195,7 +202,8 @@ class DescriptorTestCase(unittest.TestCase):
             def fn(self, arg1):
                 @defer.inlineCallbacks
                 def inner_fn():
-                    yield async.run_on_reactor()
+                    # we want this to behave like an asynchronous function
+                    yield run_on_reactor()
                     raise SynapseError(400, "blah")
 
                 return inner_fn()
@@ -205,20 +213,26 @@ class DescriptorTestCase(unittest.TestCase):
             with logcontext.LoggingContext() as c1:
                 c1.name = "c1"
                 try:
-                    yield obj.fn(1)
+                    d = obj.fn(1)
+                    self.assertEqual(
+                        logcontext.LoggingContext.current_context(),
+                        logcontext.LoggingContext.sentinel,
+                    )
+                    yield d
                     self.fail("No exception thrown")
                 except SynapseError:
                     pass
 
-                self.assertEqual(logcontext.LoggingContext.current_context(),
-                                 c1)
+                self.assertEqual(logcontext.LoggingContext.current_context(), c1)
 
         obj = Cls()
 
         # set off a deferred which will do a cache lookup
         d1 = do_lookup()
-        self.assertEqual(logcontext.LoggingContext.current_context(),
-                         logcontext.LoggingContext.sentinel)
+        self.assertEqual(
+            logcontext.LoggingContext.current_context(),
+            logcontext.LoggingContext.sentinel,
+        )
 
         return d1
 
@@ -259,3 +273,98 @@ class DescriptorTestCase(unittest.TestCase):
         r = yield obj.fn(2, 3)
         self.assertEqual(r, 'chips')
         obj.mock.assert_not_called()
+
+
+class CachedListDescriptorTestCase(unittest.TestCase):
+    @defer.inlineCallbacks
+    def test_cache(self):
+        class Cls(object):
+            def __init__(self):
+                self.mock = mock.Mock()
+
+            @descriptors.cached()
+            def fn(self, arg1, arg2):
+                pass
+
+            @descriptors.cachedList("fn", "args1", inlineCallbacks=True)
+            def list_fn(self, args1, arg2):
+                assert logcontext.LoggingContext.current_context().request == "c1"
+                # we want this to behave like an asynchronous function
+                yield run_on_reactor()
+                assert logcontext.LoggingContext.current_context().request == "c1"
+                defer.returnValue(self.mock(args1, arg2))
+
+        with logcontext.LoggingContext() as c1:
+            c1.request = "c1"
+            obj = Cls()
+            obj.mock.return_value = {10: 'fish', 20: 'chips'}
+            d1 = obj.list_fn([10, 20], 2)
+            self.assertEqual(
+                logcontext.LoggingContext.current_context(),
+                logcontext.LoggingContext.sentinel,
+            )
+            r = yield d1
+            self.assertEqual(logcontext.LoggingContext.current_context(), c1)
+            obj.mock.assert_called_once_with([10, 20], 2)
+            self.assertEqual(r, {10: 'fish', 20: 'chips'})
+            obj.mock.reset_mock()
+
+            # a call with different params should call the mock again
+            obj.mock.return_value = {30: 'peas'}
+            r = yield obj.list_fn([20, 30], 2)
+            obj.mock.assert_called_once_with([30], 2)
+            self.assertEqual(r, {20: 'chips', 30: 'peas'})
+            obj.mock.reset_mock()
+
+            # all the values should now be cached
+            r = yield obj.fn(10, 2)
+            self.assertEqual(r, 'fish')
+            r = yield obj.fn(20, 2)
+            self.assertEqual(r, 'chips')
+            r = yield obj.fn(30, 2)
+            self.assertEqual(r, 'peas')
+            r = yield obj.list_fn([10, 20, 30], 2)
+            obj.mock.assert_not_called()
+            self.assertEqual(r, {10: 'fish', 20: 'chips', 30: 'peas'})
+
+    @defer.inlineCallbacks
+    def test_invalidate(self):
+        """Make sure that invalidation callbacks are called."""
+
+        class Cls(object):
+            def __init__(self):
+                self.mock = mock.Mock()
+
+            @descriptors.cached()
+            def fn(self, arg1, arg2):
+                pass
+
+            @descriptors.cachedList("fn", "args1", inlineCallbacks=True)
+            def list_fn(self, args1, arg2):
+                # we want this to behave like an asynchronous function
+                yield run_on_reactor()
+                defer.returnValue(self.mock(args1, arg2))
+
+        obj = Cls()
+        invalidate0 = mock.Mock()
+        invalidate1 = mock.Mock()
+
+        # cache miss
+        obj.mock.return_value = {10: 'fish', 20: 'chips'}
+        r1 = yield obj.list_fn([10, 20], 2, on_invalidate=invalidate0)
+        obj.mock.assert_called_once_with([10, 20], 2)
+        self.assertEqual(r1, {10: 'fish', 20: 'chips'})
+        obj.mock.reset_mock()
+
+        # cache hit
+        r2 = yield obj.list_fn([10, 20], 2, on_invalidate=invalidate1)
+        obj.mock.assert_not_called()
+        self.assertEqual(r2, {10: 'fish', 20: 'chips'})
+
+        invalidate0.assert_not_called()
+        invalidate1.assert_not_called()
+
+        # now if we invalidate the keys, both invalidations should get called
+        obj.fn.invalidate((10, 2))
+        invalidate0.assert_called_once()
+        invalidate1.assert_called_once()
diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py
index bc92f85fa6..34fdc9a43a 100644
--- a/tests/util/test_dict_cache.py
+++ b/tests/util/test_dict_cache.py
@@ -14,13 +14,12 @@
 # limitations under the License.
 
 
-from tests import unittest
-
 from synapse.util.caches.dictionary_cache import DictionaryCache
 
+from tests import unittest
 
-class DictCacheTestCase(unittest.TestCase):
 
+class DictCacheTestCase(unittest.TestCase):
     def setUp(self):
         self.cache = DictionaryCache("foobar")
 
@@ -32,7 +31,7 @@ class DictCacheTestCase(unittest.TestCase):
 
         seq = self.cache.sequence
         test_value = {"test": "test_simple_cache_hit_full"}
-        self.cache.update(seq, key, test_value, full=True)
+        self.cache.update(seq, key, test_value)
 
         c = self.cache.get(key)
         self.assertEqual(test_value, c.value)
@@ -41,10 +40,8 @@ class DictCacheTestCase(unittest.TestCase):
         key = "test_simple_cache_hit_partial"
 
         seq = self.cache.sequence
-        test_value = {
-            "test": "test_simple_cache_hit_partial"
-        }
-        self.cache.update(seq, key, test_value, full=True)
+        test_value = {"test": "test_simple_cache_hit_partial"}
+        self.cache.update(seq, key, test_value)
 
         c = self.cache.get(key, ["test"])
         self.assertEqual(test_value, c.value)
@@ -53,10 +50,8 @@ class DictCacheTestCase(unittest.TestCase):
         key = "test_simple_cache_miss_partial"
 
         seq = self.cache.sequence
-        test_value = {
-            "test": "test_simple_cache_miss_partial"
-        }
-        self.cache.update(seq, key, test_value, full=True)
+        test_value = {"test": "test_simple_cache_miss_partial"}
+        self.cache.update(seq, key, test_value)
 
         c = self.cache.get(key, ["test2"])
         self.assertEqual({}, c.value)
@@ -70,7 +65,7 @@ class DictCacheTestCase(unittest.TestCase):
             "test2": "test_simple_cache_hit_miss_partial2",
             "test3": "test_simple_cache_hit_miss_partial3",
         }
-        self.cache.update(seq, key, test_value, full=True)
+        self.cache.update(seq, key, test_value)
 
         c = self.cache.get(key, ["test2"])
         self.assertEqual({"test2": "test_simple_cache_hit_miss_partial2"}, c.value)
@@ -79,16 +74,12 @@ class DictCacheTestCase(unittest.TestCase):
         key = "test_simple_cache_hit_miss_partial"
 
         seq = self.cache.sequence
-        test_value_1 = {
-            "test": "test_simple_cache_hit_miss_partial",
-        }
-        self.cache.update(seq, key, test_value_1, full=False)
+        test_value_1 = {"test": "test_simple_cache_hit_miss_partial"}
+        self.cache.update(seq, key, test_value_1, fetched_keys=set("test"))
 
         seq = self.cache.sequence
-        test_value_2 = {
-            "test2": "test_simple_cache_hit_miss_partial2",
-        }
-        self.cache.update(seq, key, test_value_2, full=False)
+        test_value_2 = {"test2": "test_simple_cache_hit_miss_partial2"}
+        self.cache.update(seq, key, test_value_2, fetched_keys=set("test2"))
 
         c = self.cache.get(key)
         self.assertEqual(
@@ -96,5 +87,5 @@ class DictCacheTestCase(unittest.TestCase):
                 "test": "test_simple_cache_hit_miss_partial",
                 "test2": "test_simple_cache_hit_miss_partial2",
             },
-            c.value
+            c.value,
         )
diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py
index 31d24adb8b..5cbada4eda 100644
--- a/tests/util/test_expiring_cache.py
+++ b/tests/util/test_expiring_cache.py
@@ -14,15 +14,14 @@
 # limitations under the License.
 
 
-from .. import unittest
-
 from synapse.util.caches.expiringcache import ExpiringCache
 
 from tests.utils import MockClock
 
+from .. import unittest
 
-class ExpiringCacheTestCase(unittest.TestCase):
 
+class ExpiringCacheTestCase(unittest.TestCase):
     def test_get_set(self):
         clock = MockClock()
         cache = ExpiringCache("test", clock, max_len=1)
diff --git a/tests/util/test_file_consumer.py b/tests/util/test_file_consumer.py
index d6e1082779..e90e08d1c0 100644
--- a/tests/util/test_file_consumer.py
+++ b/tests/util/test_file_consumer.py
@@ -14,23 +14,23 @@
 # limitations under the License.
 
 
-from twisted.internet import defer, reactor
+import threading
+
 from mock import NonCallableMock
+from six import StringIO
+
+from twisted.internet import defer, reactor
 
 from synapse.util.file_consumer import BackgroundFileConsumer
 
 from tests import unittest
-from six import StringIO
-
-import threading
 
 
 class FileConsumerTests(unittest.TestCase):
-
     @defer.inlineCallbacks
     def test_pull_consumer(self):
         string_file = StringIO()
-        consumer = BackgroundFileConsumer(string_file)
+        consumer = BackgroundFileConsumer(string_file, reactor=reactor)
 
         try:
             producer = DummyPullProducer()
@@ -54,7 +54,7 @@ class FileConsumerTests(unittest.TestCase):
     @defer.inlineCallbacks
     def test_push_consumer(self):
         string_file = BlockingStringWrite()
-        consumer = BackgroundFileConsumer(string_file)
+        consumer = BackgroundFileConsumer(string_file, reactor=reactor)
 
         try:
             producer = NonCallableMock(spec_set=[])
@@ -80,13 +80,15 @@ class FileConsumerTests(unittest.TestCase):
     @defer.inlineCallbacks
     def test_push_producer_feedback(self):
         string_file = BlockingStringWrite()
-        consumer = BackgroundFileConsumer(string_file)
+        consumer = BackgroundFileConsumer(string_file, reactor=reactor)
 
         try:
             producer = NonCallableMock(spec_set=["pauseProducing", "resumeProducing"])
 
             resume_deferred = defer.Deferred()
-            producer.resumeProducing.side_effect = lambda: resume_deferred.callback(None)
+            producer.resumeProducing.side_effect = lambda: resume_deferred.callback(
+                None
+            )
 
             consumer.registerProducer(producer, True)
 
diff --git a/tests/util/test_limiter.py b/tests/util/test_limiter.py
deleted file mode 100644
index 9c795d9fdb..0000000000
--- a/tests/util/test_limiter.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from tests import unittest
-
-from twisted.internet import defer
-
-from synapse.util.async import Limiter
-
-
-class LimiterTestCase(unittest.TestCase):
-
-    @defer.inlineCallbacks
-    def test_limiter(self):
-        limiter = Limiter(3)
-
-        key = object()
-
-        d1 = limiter.queue(key)
-        cm1 = yield d1
-
-        d2 = limiter.queue(key)
-        cm2 = yield d2
-
-        d3 = limiter.queue(key)
-        cm3 = yield d3
-
-        d4 = limiter.queue(key)
-        self.assertFalse(d4.called)
-
-        d5 = limiter.queue(key)
-        self.assertFalse(d5.called)
-
-        with cm1:
-            self.assertFalse(d4.called)
-            self.assertFalse(d5.called)
-
-        self.assertTrue(d4.called)
-        self.assertFalse(d5.called)
-
-        with cm3:
-            self.assertFalse(d5.called)
-
-        self.assertTrue(d5.called)
-
-        with cm2:
-            pass
-
-        with (yield d4):
-            pass
-
-        with (yield d5):
-            pass
-
-        d6 = limiter.queue(key)
-        with (yield d6):
-            pass
diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py
index 4865eb4bc6..61a55b461b 100644
--- a/tests/util/test_linearizer.py
+++ b/tests/util/test_linearizer.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,17 +13,19 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from synapse.util import async, logcontext
-from tests import unittest
-
-from twisted.internet import defer
 
-from synapse.util.async import Linearizer
 from six.moves import range
 
+from twisted.internet import defer, reactor
+from twisted.internet.defer import CancelledError
 
-class LinearizerTestCase(unittest.TestCase):
+from synapse.util import Clock, logcontext
+from synapse.util.async_helpers import Linearizer
 
+from tests import unittest
+
+
+class LinearizerTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_linearizer(self):
         linearizer = Linearizer()
@@ -50,16 +53,90 @@ class LinearizerTestCase(unittest.TestCase):
         def func(i, sleep=False):
             with logcontext.LoggingContext("func(%s)" % i) as lc:
                 with (yield linearizer.queue("")):
-                    self.assertEqual(
-                        logcontext.LoggingContext.current_context(), lc)
+                    self.assertEqual(logcontext.LoggingContext.current_context(), lc)
                     if sleep:
-                        yield async.sleep(0)
+                        yield Clock(reactor).sleep(0)
 
-                self.assertEqual(
-                    logcontext.LoggingContext.current_context(), lc)
+                self.assertEqual(logcontext.LoggingContext.current_context(), lc)
 
         func(0, sleep=True)
         for i in range(1, 100):
             func(i)
 
         return func(1000)
+
+    @defer.inlineCallbacks
+    def test_multiple_entries(self):
+        limiter = Linearizer(max_count=3)
+
+        key = object()
+
+        d1 = limiter.queue(key)
+        cm1 = yield d1
+
+        d2 = limiter.queue(key)
+        cm2 = yield d2
+
+        d3 = limiter.queue(key)
+        cm3 = yield d3
+
+        d4 = limiter.queue(key)
+        self.assertFalse(d4.called)
+
+        d5 = limiter.queue(key)
+        self.assertFalse(d5.called)
+
+        with cm1:
+            self.assertFalse(d4.called)
+            self.assertFalse(d5.called)
+
+        cm4 = yield d4
+        self.assertFalse(d5.called)
+
+        with cm3:
+            self.assertFalse(d5.called)
+
+        cm5 = yield d5
+
+        with cm2:
+            pass
+
+        with cm4:
+            pass
+
+        with cm5:
+            pass
+
+        d6 = limiter.queue(key)
+        with (yield d6):
+            pass
+
+    @defer.inlineCallbacks
+    def test_cancellation(self):
+        linearizer = Linearizer()
+
+        key = object()
+
+        d1 = linearizer.queue(key)
+        cm1 = yield d1
+
+        d2 = linearizer.queue(key)
+        self.assertFalse(d2.called)
+
+        d3 = linearizer.queue(key)
+        self.assertFalse(d3.called)
+
+        d2.cancel()
+
+        with cm1:
+            pass
+
+        self.assertTrue(d2.called)
+        try:
+            yield d2
+            self.fail("Expected d2 to raise CancelledError")
+        except CancelledError:
+            pass
+
+        with (yield d3):
+            pass
diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py
index ad78d884e0..4633db77b3 100644
--- a/tests/util/test_logcontext.py
+++ b/tests/util/test_logcontext.py
@@ -1,19 +1,15 @@
 import twisted.python.failure
-from twisted.internet import defer
-from twisted.internet import reactor
-from .. import unittest
+from twisted.internet import defer, reactor
 
-from synapse.util.async import sleep
-from synapse.util import logcontext
+from synapse.util import Clock, logcontext
 from synapse.util.logcontext import LoggingContext
 
+from .. import unittest
+
 
 class LoggingContextTestCase(unittest.TestCase):
-
     def _check_test_key(self, value):
-        self.assertEquals(
-            LoggingContext.current_context().request, value
-        )
+        self.assertEquals(LoggingContext.current_context().request, value)
 
     def test_with_context(self):
         with LoggingContext() as context_one:
@@ -22,18 +18,20 @@ class LoggingContextTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_sleep(self):
+        clock = Clock(reactor)
+
         @defer.inlineCallbacks
         def competing_callback():
             with LoggingContext() as competing_context:
                 competing_context.request = "competing"
-                yield sleep(0)
+                yield clock.sleep(0)
                 self._check_test_key("competing")
 
         reactor.callLater(0, competing_callback)
 
         with LoggingContext() as context_one:
             context_one.request = "one"
-            yield sleep(0)
+            yield clock.sleep(0)
             self._check_test_key("one")
 
     def _test_run_in_background(self, function):
@@ -49,6 +47,7 @@ class LoggingContextTestCase(unittest.TestCase):
                 self._check_test_key("one")
                 callback_completed[0] = True
                 return res
+
             d.addCallback(cb)
 
             return d
@@ -73,8 +72,7 @@ class LoggingContextTestCase(unittest.TestCase):
             # make sure that the context was reset before it got thrown back
             # into the reactor
             try:
-                self.assertIs(LoggingContext.current_context(),
-                              sentinel_context)
+                self.assertIs(LoggingContext.current_context(), sentinel_context)
                 d2.callback(None)
             except BaseException:
                 d2.errback(twisted.python.failure.Failure())
@@ -87,7 +85,7 @@ class LoggingContextTestCase(unittest.TestCase):
     def test_run_in_background_with_blocking_fn(self):
         @defer.inlineCallbacks
         def blocking_function():
-            yield sleep(0)
+            yield Clock(reactor).sleep(0)
 
         return self._test_run_in_background(blocking_function)
 
@@ -103,9 +101,7 @@ class LoggingContextTestCase(unittest.TestCase):
         # a function which returns a deferred which looks like it has been
         # called, but is actually paused
         def testfunc():
-            return logcontext.make_deferred_yieldable(
-                _chained_deferred_function()
-            )
+            return logcontext.make_deferred_yieldable(_chained_deferred_function())
 
         return self._test_run_in_background(testfunc)
 
@@ -174,5 +170,6 @@ def _chained_deferred_function():
         d2 = defer.Deferred()
         reactor.callLater(0, d2.callback, res)
         return d2
+
     d.addCallback(cb)
     return d
diff --git a/tests/util/test_logformatter.py b/tests/util/test_logformatter.py
index 1a1a8412f2..297aebbfbe 100644
--- a/tests/util/test_logformatter.py
+++ b/tests/util/test_logformatter.py
@@ -15,6 +15,7 @@
 import sys
 
 from synapse.util.logformatter import LogFormatter
+
 from tests import unittest
 
 
diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py
index dfb78cb8bd..786947375d 100644
--- a/tests/util/test_lrucache.py
+++ b/tests/util/test_lrucache.py
@@ -14,16 +14,15 @@
 # limitations under the License.
 
 
-from .. import unittest
+from mock import Mock
 
 from synapse.util.caches.lrucache import LruCache
 from synapse.util.caches.treecache import TreeCache
 
-from mock import Mock
+from .. import unittest
 
 
 class LruCacheTestCase(unittest.TestCase):
-
     def test_get_set(self):
         cache = LruCache(1)
         cache["key"] = "value"
@@ -235,7 +234,6 @@ class LruCacheCallbacksTestCase(unittest.TestCase):
 
 
 class LruCacheSizedTestCase(unittest.TestCase):
-
     def test_evict(self):
         cache = LruCache(5, size_callback=len)
         cache["key1"] = [0]
diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py
index 1d745ae1a7..bd32e2cee7 100644
--- a/tests/util/test_rwlock.py
+++ b/tests/util/test_rwlock.py
@@ -14,13 +14,12 @@
 # limitations under the License.
 
 
-from tests import unittest
+from synapse.util.async_helpers import ReadWriteLock
 
-from synapse.util.async import ReadWriteLock
+from tests import unittest
 
 
 class ReadWriteLockTestCase(unittest.TestCase):
-
     def _assert_called_before_not_after(self, lst, first_false):
         for i, d in enumerate(lst[:first_false]):
             self.assertTrue(d.called, msg="%d was unexpectedly false" % i)
@@ -36,12 +35,12 @@ class ReadWriteLockTestCase(unittest.TestCase):
         key = object()
 
         ds = [
-            rwlock.read(key),   # 0
-            rwlock.read(key),   # 1
+            rwlock.read(key),  # 0
+            rwlock.read(key),  # 1
             rwlock.write(key),  # 2
             rwlock.write(key),  # 3
-            rwlock.read(key),   # 4
-            rwlock.read(key),   # 5
+            rwlock.read(key),  # 4
+            rwlock.read(key),  # 5
             rwlock.write(key),  # 6
         ]
 
diff --git a/tests/util/test_snapshot_cache.py b/tests/util/test_snapshot_cache.py
index d3a8630c2f..1a44f72425 100644
--- a/tests/util/test_snapshot_cache.py
+++ b/tests/util/test_snapshot_cache.py
@@ -14,14 +14,14 @@
 # limitations under the License.
 
 
-from .. import unittest
+from twisted.internet.defer import Deferred
 
 from synapse.util.caches.snapshot_cache import SnapshotCache
-from twisted.internet.defer import Deferred
 
+from .. import unittest
 
-class SnapshotCacheTestCase(unittest.TestCase):
 
+class SnapshotCacheTestCase(unittest.TestCase):
     def setUp(self):
         self.cache = SnapshotCache()
         self.cache.DURATION_MS = 1
diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py
index 67ece166c7..f2be63706b 100644
--- a/tests/util/test_stream_change_cache.py
+++ b/tests/util/test_stream_change_cache.py
@@ -1,8 +1,9 @@
-from tests import unittest
 from mock import patch
 
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
+from tests import unittest
+
 
 class StreamChangeCacheTests(unittest.TestCase):
     """
@@ -140,8 +141,8 @@ class StreamChangeCacheTests(unittest.TestCase):
         )
 
         # Query all the entries mid-way through the stream, but include one
-        # that doesn't exist in it. We should get back the one that doesn't
-        # exist, too.
+        # that doesn't exist in it. We shouldn't get back the one that doesn't
+        # exist.
         self.assertEqual(
             cache.get_entities_changed(
                 [
@@ -152,7 +153,7 @@ class StreamChangeCacheTests(unittest.TestCase):
                 ],
                 stream_pos=2,
             ),
-            set(["bar@baz.net", "user@elsewhere.org", "not@here.website"]),
+            set(["bar@baz.net", "user@elsewhere.org"]),
         )
 
         # Query all the entries, but before the first known point. We will get
@@ -177,6 +178,13 @@ class StreamChangeCacheTests(unittest.TestCase):
             ),
         )
 
+        # Query a subset of the entries mid-way through the stream. We should
+        # only get back the subset.
+        self.assertEqual(
+            cache.get_entities_changed(["bar@baz.net"], stream_pos=2),
+            set(["bar@baz.net"]),
+        )
+
     def test_max_pos(self):
         """
         StreamChangeCache.get_max_pos_of_last_change will return the most
diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py
index 7ab578a185..a5f2261208 100644
--- a/tests/util/test_treecache.py
+++ b/tests/util/test_treecache.py
@@ -14,10 +14,10 @@
 # limitations under the License.
 
 
-from .. import unittest
-
 from synapse.util.caches.treecache import TreeCache
 
+from .. import unittest
+
 
 class TreeCacheTestCase(unittest.TestCase):
     def test_get_set_onelevel(self):
diff --git a/tests/util/test_wheel_timer.py b/tests/util/test_wheel_timer.py
index fdb24a48b0..03201a4d9b 100644
--- a/tests/util/test_wheel_timer.py
+++ b/tests/util/test_wheel_timer.py
@@ -13,10 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from .. import unittest
-
 from synapse.util.wheel_timer import WheelTimer
 
+from .. import unittest
+
 
 class WheelTimerTestCase(unittest.TestCase):
     def test_single_insert_fetch(self):
diff --git a/tests/utils.py b/tests/utils.py
index 262c4a5714..63e30dc6c0 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -13,35 +13,108 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import atexit
 import hashlib
+import os
+import uuid
 from inspect import getcallargs
-from six.moves.urllib import parse as urlparse
 
 from mock import Mock, patch
+from six.moves.urllib import parse as urlparse
+
 from twisted.internet import defer, reactor
 
+from synapse.api.constants import EventTypes
 from synapse.api.errors import CodeMessageException, cs_error
+from synapse.config.server import ServerConfig
 from synapse.federation.transport import server
 from synapse.http.server import HttpServer
 from synapse.server import HomeServer
-from synapse.storage import PostgresEngine
+from synapse.storage import DataStore, PostgresEngine
 from synapse.storage.engines import create_engine
-from synapse.storage.prepare_database import prepare_database
+from synapse.storage.prepare_database import (
+    _get_or_create_schema_state,
+    _setup_new_database,
+    prepare_database,
+)
 from synapse.util.logcontext import LoggingContext
 from synapse.util.ratelimitutils import FederationRateLimiter
 
 # set this to True to run the tests against postgres instead of sqlite.
-# It requires you to have a local postgres database called synapse_test, within
-# which ALL TABLES WILL BE DROPPED
-USE_POSTGRES_FOR_TESTS = False
+USE_POSTGRES_FOR_TESTS = os.environ.get("SYNAPSE_POSTGRES", False)
+POSTGRES_USER = os.environ.get("SYNAPSE_POSTGRES_USER", "postgres")
+POSTGRES_BASE_DB = "_synapse_unit_tests_base_%s" % (os.getpid(),)
+
+
+def setupdb():
+
+    # If we're using PostgreSQL, set up the db once
+    if USE_POSTGRES_FOR_TESTS:
+        pgconfig = {
+            "name": "psycopg2",
+            "args": {
+                "database": POSTGRES_BASE_DB,
+                "user": POSTGRES_USER,
+                "cp_min": 1,
+                "cp_max": 5,
+            },
+        }
+        config = Mock()
+        config.password_providers = []
+        config.database_config = pgconfig
+        db_engine = create_engine(pgconfig)
+        db_conn = db_engine.module.connect(user=POSTGRES_USER)
+        db_conn.autocommit = True
+        cur = db_conn.cursor()
+        cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,))
+        cur.execute("CREATE DATABASE %s;" % (POSTGRES_BASE_DB,))
+        cur.close()
+        db_conn.close()
+
+        # Set up in the db
+        db_conn = db_engine.module.connect(
+            database=POSTGRES_BASE_DB, user=POSTGRES_USER
+        )
+        cur = db_conn.cursor()
+        _get_or_create_schema_state(cur, db_engine)
+        _setup_new_database(cur, db_engine)
+        db_conn.commit()
+        cur.close()
+        db_conn.close()
+
+        def _cleanup():
+            db_conn = db_engine.module.connect(user=POSTGRES_USER)
+            db_conn.autocommit = True
+            cur = db_conn.cursor()
+            cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,))
+            cur.close()
+            db_conn.close()
+
+        atexit.register(_cleanup)
+
+
+class TestHomeServer(HomeServer):
+    DATASTORE_CLASS = DataStore
 
 
 @defer.inlineCallbacks
-def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
-    """Setup a homeserver suitable for running tests against. Keyword arguments
-    are passed to the Homeserver constructor. If no datastore is supplied a
-    datastore backed by an in-memory sqlite db will be given to the HS.
+def setup_test_homeserver(
+    cleanup_func, name="test", datastore=None, config=None, reactor=None,
+    homeserverToUse=TestHomeServer, **kargs
+):
+    """
+    Setup a homeserver suitable for running tests against.  Keyword arguments
+    are passed to the Homeserver constructor.
+
+    If no datastore is supplied, one is created and given to the homeserver.
+
+    Args:
+        cleanup_func : The function used to register a cleanup routine for
+                       after the test.
     """
+    if reactor is None:
+        from twisted.internet import reactor
+
     if config is None:
         config = Mock()
         config.signing_key = [MockKey()]
@@ -60,16 +133,37 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
         config.federation_domain_whitelist = None
         config.federation_rc_reject_limit = 10
         config.federation_rc_sleep_limit = 10
+        config.federation_rc_sleep_delay = 100
         config.federation_rc_concurrent = 10
         config.filter_timeline_limit = 5000
         config.user_directory_search_all_users = False
         config.user_consent_server_notice_content = None
         config.block_events_without_consent_error = None
+        config.media_storage_providers = []
+        config.auto_join_rooms = []
+        config.limit_usage_by_mau = False
+        config.hs_disabled = False
+        config.hs_disabled_message = ""
+        config.hs_disabled_limit_type = ""
+        config.max_mau_value = 50
+        config.mau_limits_reserved_threepids = []
+        config.admin_contact = None
+        config.rc_messages_per_second = 10000
+        config.rc_message_burst_count = 10000
+
+        # we need a sane default_room_version, otherwise attempts to create rooms will
+        # fail.
+        config.default_room_version = "1"
 
         # disable user directory updates, because they get done in the
         # background, which upsets the test runner.
         config.update_user_directory = False
 
+        def is_threepid_reserved(threepid):
+            return ServerConfig.is_threepid_reserved(config, threepid)
+
+        config.is_threepid_reserved.side_effect = is_threepid_reserved
+
     config.use_frozen_dicts = True
     config.ldap_enabled = False
 
@@ -77,58 +171,95 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
         kargs["clock"] = MockClock()
 
     if USE_POSTGRES_FOR_TESTS:
+        test_db = "synapse_test_%s" % uuid.uuid4().hex
+
         config.database_config = {
             "name": "psycopg2",
-            "args": {
-                "database": "synapse_test",
-                "cp_min": 1,
-                "cp_max": 5,
-            },
+            "args": {"database": test_db, "cp_min": 1, "cp_max": 5},
         }
     else:
         config.database_config = {
             "name": "sqlite3",
-            "args": {
-                "database": ":memory:",
-                "cp_min": 1,
-                "cp_max": 1,
-            },
+            "args": {"database": ":memory:", "cp_min": 1, "cp_max": 1},
         }
 
     db_engine = create_engine(config.database_config)
 
+    # Create the database before we actually try and connect to it, based off
+    # the template database we generate in setupdb()
+    if datastore is None and isinstance(db_engine, PostgresEngine):
+        db_conn = db_engine.module.connect(
+            database=POSTGRES_BASE_DB, user=POSTGRES_USER
+        )
+        db_conn.autocommit = True
+        cur = db_conn.cursor()
+        cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
+        cur.execute(
+            "CREATE DATABASE %s WITH TEMPLATE %s;" % (test_db, POSTGRES_BASE_DB)
+        )
+        cur.close()
+        db_conn.close()
+
     # we need to configure the connection pool to run the on_new_connection
     # function, so that we can test code that uses custom sqlite functions
     # (like rank).
     config.database_config["args"]["cp_openfun"] = db_engine.on_new_connection
 
     if datastore is None:
-        hs = HomeServer(
-            name, config=config,
+        hs = homeserverToUse(
+            name,
+            config=config,
             db_config=config.database_config,
             version_string="Synapse/tests",
             database_engine=db_engine,
             room_list_handler=object(),
             tls_server_context_factory=Mock(),
+            tls_client_options_factory=Mock(),
+            reactor=reactor,
             **kargs
         )
-        db_conn = hs.get_db_conn()
-        # make sure that the database is empty
-        if isinstance(db_engine, PostgresEngine):
-            cur = db_conn.cursor()
-            cur.execute("SELECT tablename FROM pg_tables where schemaname='public'")
-            rows = cur.fetchall()
-            for r in rows:
-                cur.execute("DROP TABLE %s CASCADE" % r[0])
-        yield prepare_database(db_conn, db_engine, config)
+
+        # Prepare the DB on SQLite -- PostgreSQL is a copy of an already up to
+        # date db
+        if not isinstance(db_engine, PostgresEngine):
+            db_conn = hs.get_db_conn()
+            yield prepare_database(db_conn, db_engine, config)
+            db_conn.commit()
+            db_conn.close()
+
+        else:
+            # We need to do cleanup on PostgreSQL
+            def cleanup():
+                # Close all the db pools
+                hs.get_db_pool().close()
+
+                # Drop the test database
+                db_conn = db_engine.module.connect(
+                    database=POSTGRES_BASE_DB, user=POSTGRES_USER
+                )
+                db_conn.autocommit = True
+                cur = db_conn.cursor()
+                cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
+                db_conn.commit()
+                cur.close()
+                db_conn.close()
+
+            # Register the cleanup hook
+            cleanup_func(cleanup)
+
         hs.setup()
     else:
-        hs = HomeServer(
-            name, db_pool=None, datastore=datastore, config=config,
+        hs = homeserverToUse(
+            name,
+            db_pool=None,
+            datastore=datastore,
+            config=config,
             version_string="Synapse/tests",
             database_engine=db_engine,
             room_list_handler=object(),
             tls_server_context_factory=Mock(),
+            tls_client_options_factory=Mock(),
+            reactor=reactor,
             **kargs
         )
 
@@ -136,8 +267,10 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
     # Need to let the HS build an auth handler and then mess with it
     # because AuthHandler's constructor requires the HS, so we can't make one
     # beforehand and pass it in to the HS's constructor (chicken / egg)
-    hs.get_auth_handler().hash = lambda p: hashlib.md5(p).hexdigest()
-    hs.get_auth_handler().validate_hash = lambda p, h: hashlib.md5(p).hexdigest() == h
+    hs.get_auth_handler().hash = lambda p: hashlib.md5(p.encode('utf8')).hexdigest()
+    hs.get_auth_handler().validate_hash = (
+        lambda p, h: hashlib.md5(p.encode('utf8')).hexdigest() == h
+    )
 
     fed = kargs.get("resource_for_federation", None)
     if fed:
@@ -151,7 +284,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
                 sleep_limit=hs.config.federation_rc_sleep_limit,
                 sleep_msec=hs.config.federation_rc_sleep_delay,
                 reject_limit=hs.config.federation_rc_reject_limit,
-                concurrent_requests=hs.config.federation_rc_concurrent
+                concurrent_requests=hs.config.federation_rc_concurrent,
             ),
         )
 
@@ -177,13 +310,12 @@ def mock_getRawHeaders(headers=None):
 
 # This is a mock /resource/ not an entire server
 class MockHttpResource(HttpServer):
-
     def __init__(self, prefix=""):
         self.callbacks = []  # 3-tuple of method/pattern/function
         self.prefix = prefix
 
     def trigger_get(self, path):
-        return self.trigger("GET", path, None)
+        return self.trigger(b"GET", path, None)
 
     @patch('twisted.web.http.Request')
     @defer.inlineCallbacks
@@ -210,14 +342,14 @@ class MockHttpResource(HttpServer):
         mock_content.configure_mock(**config)
         mock_request.content = mock_content
 
-        mock_request.method = http_method
-        mock_request.uri = path
+        mock_request.method = http_method.encode('ascii')
+        mock_request.uri = path.encode('ascii')
 
         mock_request.getClientIP.return_value = "-"
 
         headers = {}
         if federation_auth:
-            headers[b"Authorization"] = ["X-Matrix origin=test,key=,sig="]
+            headers[b"Authorization"] = [b"X-Matrix origin=test,key=,sig="]
         mock_request.requestHeaders.getRawHeaders = mock_getRawHeaders(headers)
 
         # return the right path if the event requires it
@@ -231,6 +363,9 @@ class MockHttpResource(HttpServer):
         except Exception:
             pass
 
+        if isinstance(path, bytes):
+            path = path.decode('utf8')
+
         for (method, pattern, func) in self.callbacks:
             if http_method != method:
                 continue
@@ -238,15 +373,9 @@ class MockHttpResource(HttpServer):
             matcher = pattern.match(path)
             if matcher:
                 try:
-                    args = [
-                        urlparse.unquote(u).decode("UTF-8")
-                        for u in matcher.groups()
-                    ]
-
-                    (code, response) = yield func(
-                        mock_request,
-                        *args
-                    )
+                    args = [urlparse.unquote(u) for u in matcher.groups()]
+
+                    (code, response) = yield func(mock_request, *args)
                     defer.returnValue((code, response))
                 except CodeMessageException as e:
                     defer.returnValue((e.code, cs_error(e.msg, code=e.errcode)))
@@ -347,8 +476,7 @@ class MockClock(object):
 
 def _format_call(args, kwargs):
     return ", ".join(
-        ["%r" % (a) for a in args] +
-        ["%s=%r" % (k, v) for k, v in kwargs.items()]
+        ["%r" % (a) for a in args] + ["%s=%r" % (k, v) for k, v in kwargs.items()]
     )
 
 
@@ -366,8 +494,9 @@ class DeferredMockCallable(object):
         self.calls.append((args, kwargs))
 
         if not self.expectations:
-            raise ValueError("%r has no pending calls to handle call(%s)" % (
-                self, _format_call(args, kwargs))
+            raise ValueError(
+                "%r has no pending calls to handle call(%s)"
+                % (self, _format_call(args, kwargs))
             )
 
         for (call, result, d) in self.expectations:
@@ -375,9 +504,9 @@ class DeferredMockCallable(object):
                 d.callback(None)
                 return result
 
-        failure = AssertionError("Was not expecting call(%s)" % (
-            _format_call(args, kwargs)
-        ))
+        failure = AssertionError(
+            "Was not expecting call(%s)" % (_format_call(args, kwargs))
+        )
 
         for _, _, d in self.expectations:
             try:
@@ -393,17 +522,19 @@ class DeferredMockCallable(object):
     @defer.inlineCallbacks
     def await_calls(self, timeout=1000):
         deferred = defer.DeferredList(
-            [d for _, _, d in self.expectations],
-            fireOnOneErrback=True
+            [d for _, _, d in self.expectations], fireOnOneErrback=True
         )
 
         timer = reactor.callLater(
             timeout / 1000,
             deferred.errback,
-            AssertionError("%d pending calls left: %s" % (
-                len([e for e in self.expectations if not e[2].called]),
-                [e for e in self.expectations if not e[2].called]
-            ))
+            AssertionError(
+                "%d pending calls left: %s"
+                % (
+                    len([e for e in self.expectations if not e[2].called]),
+                    [e for e in self.expectations if not e[2].called],
+                )
+            ),
         )
 
         yield deferred
@@ -418,7 +549,35 @@ class DeferredMockCallable(object):
             self.calls = []
 
             raise AssertionError(
-                "Expected not to received any calls, got:\n" + "\n".join([
-                    "call(%s)" % _format_call(c[0], c[1]) for c in calls
-                ])
+                "Expected not to received any calls, got:\n"
+                + "\n".join(["call(%s)" % _format_call(c[0], c[1]) for c in calls])
             )
+
+
+@defer.inlineCallbacks
+def create_room(hs, room_id, creator_id):
+    """Creates and persist a creation event for the given room
+
+    Args:
+        hs
+        room_id (str)
+        creator_id (str)
+    """
+
+    store = hs.get_datastore()
+    event_builder_factory = hs.get_event_builder_factory()
+    event_creation_handler = hs.get_event_creation_handler()
+
+    builder = event_builder_factory.new({
+        "type": EventTypes.Create,
+        "state_key": "",
+        "sender": creator_id,
+        "room_id": room_id,
+        "content": {},
+    })
+
+    event, context = yield event_creation_handler.create_new_client_event(
+        builder
+    )
+
+    yield store.persist_event(event, context)
diff --git a/tox.ini b/tox.ini
index 5d79098d2f..085f438989 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,7 @@
 [tox]
-envlist = packaging, py27, py36, pep8
+envlist = packaging, py27, py36, pep8, check_isort
 
-[testenv]
+[base]
 deps =
     coverage
     Twisted>=15.1
@@ -15,6 +15,15 @@ deps =
 setenv =
     PYTHONDONTWRITEBYTECODE = no_byte_code
 
+[testenv]
+deps =
+    {[base]deps}
+
+setenv =
+    {[base]setenv}
+
+passenv = *
+
 commands =
     /usr/bin/find "{toxinidir}" -name '*.pyc' -delete
     coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \
@@ -46,6 +55,15 @@ commands =
 # )
 usedevelop=true
 
+[testenv:py27-postgres]
+usedevelop=true
+deps =
+    {[base]deps}
+     psycopg2
+setenv =
+    {[base]setenv}
+    SYNAPSE_POSTGRES = 1
+
 [testenv:py36]
 usedevelop=true
 commands =
@@ -102,3 +120,15 @@ basepython = python2.7
 deps =
     flake8
 commands = /bin/sh -c "flake8 synapse tests {env:PEP8SUFFIX:}"
+
+[testenv:check_isort]
+skip_install = True
+deps = isort
+commands = /bin/sh -c "isort -c -sp setup.cfg -rc synapse tests"
+
+[testenv:check-newsfragment]
+skip_install = True
+deps = towncrier>=18.6.0rc1
+commands =
+   python -m towncrier.check --compare-with=origin/develop
+basepython = python3.6