summary refs log tree commit diff
diff options
context:
space:
mode:
authorDavid Baker <dave@matrix.org>2016-08-11 14:09:13 +0100
committerDavid Baker <dave@matrix.org>2016-08-11 14:09:13 +0100
commitb4ecf0b886c67437901e0af457c5f801ebde9a72 (patch)
treeef66b0684edcfeb4ad68d20375641f4654393f44
parentInclude the ts the notif was received at (diff)
parentMerge pull request #1003 from matrix-org/erikj/redaction_prev_content (diff)
downloadsynapse-b4ecf0b886c67437901e0af457c5f801ebde9a72.tar.xz
Merge remote-tracking branch 'origin/develop' into dbkr/notifications_api
Diffstat (limited to '')
-rw-r--r--CHANGES.rst302
-rw-r--r--MANIFEST.in3
-rw-r--r--README.rst21
-rw-r--r--UPGRADE.rst2
-rw-r--r--contrib/systemd/synapse.service1
-rw-r--r--docs/admin_api/README.rst12
-rw-r--r--docs/admin_api/purge_history_api.rst15
-rw-r--r--docs/admin_api/purge_remote_media.rst19
-rw-r--r--docs/application_services.rst3
-rw-r--r--docs/code_style.rst7
-rw-r--r--docs/turn-howto.rst19
-rwxr-xr-xjenkins-dendron-postgres.sh22
-rwxr-xr-xjenkins-postgres.sh58
-rwxr-xr-xjenkins-sqlite.sh52
-rwxr-xr-xjenkins-unittests.sh4
-rwxr-xr-xjenkins/clone.sh44
-rwxr-xr-xjenkins/prepare_synapse.sh19
-rw-r--r--res/templates/mail.css5
-rw-r--r--res/templates/notif.html6
-rw-r--r--res/templates/notif.txt6
-rw-r--r--res/templates/notif_mail.html18
-rw-r--r--scripts-dev/federation_client.py13
-rwxr-xr-xscripts/hash_password18
-rwxr-xr-xscripts/register_new_matrix_user32
-rwxr-xr-xscripts/synapse_port_db175
-rw-r--r--setup.cfg6
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/api/auth.py188
-rw-r--r--synapse/api/errors.py4
-rw-r--r--synapse/api/filtering.py23
-rw-r--r--synapse/app/__init__.py8
-rw-r--r--synapse/app/federation_reader.py206
-rwxr-xr-xsynapse/app/homeserver.py16
-rw-r--r--synapse/app/pusher.py136
-rw-r--r--synapse/app/synchrotron.py465
-rwxr-xr-xsynapse/app/synctl.py178
-rw-r--r--synapse/appservice/scheduler.py14
-rw-r--r--synapse/config/_base.py147
-rw-r--r--synapse/config/captcha.py1
-rw-r--r--synapse/config/emailconfig.py2
-rw-r--r--synapse/config/homeserver.py4
-rw-r--r--synapse/config/ldap.py102
-rw-r--r--synapse/config/logger.py102
-rw-r--r--synapse/config/password.py4
-rw-r--r--synapse/config/server.py55
-rw-r--r--synapse/config/workers.py31
-rw-r--r--synapse/crypto/keyclient.py12
-rw-r--r--synapse/crypto/keyring.py180
-rw-r--r--synapse/events/utils.py2
-rw-r--r--synapse/federation/federation_base.py3
-rw-r--r--synapse/federation/federation_client.py209
-rw-r--r--synapse/federation/federation_server.py280
-rw-r--r--synapse/federation/replication.py2
-rw-r--r--synapse/federation/transaction_queue.py381
-rw-r--r--synapse/federation/transport/client.py34
-rw-r--r--synapse/federation/transport/server.py176
-rw-r--r--synapse/handlers/__init__.py33
-rw-r--r--synapse/handlers/_base.py19
-rw-r--r--synapse/handlers/appservice.py15
-rw-r--r--synapse/handlers/auth.py415
-rw-r--r--synapse/handlers/device.py181
-rw-r--r--synapse/handlers/directory.py3
-rw-r--r--synapse/handlers/e2e_keys.py139
-rw-r--r--synapse/handlers/federation.py189
-rw-r--r--synapse/handlers/identity.py41
-rw-r--r--synapse/handlers/message.py98
-rw-r--r--synapse/handlers/presence.py172
-rw-r--r--synapse/handlers/profile.py19
-rw-r--r--synapse/handlers/register.py78
-rw-r--r--synapse/handlers/room.py93
-rw-r--r--synapse/handlers/room_member.py20
-rw-r--r--synapse/handlers/sync.py1154
-rw-r--r--synapse/handlers/typing.py67
-rw-r--r--synapse/http/client.py28
-rw-r--r--synapse/http/matrixfederationclient.py4
-rw-r--r--synapse/http/server.py1
-rw-r--r--synapse/metrics/__init__.py62
-rw-r--r--synapse/metrics/metric.py84
-rw-r--r--synapse/notifier.py24
-rw-r--r--synapse/push/action_generator.py2
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py79
-rw-r--r--synapse/push/clientformat.py18
-rw-r--r--synapse/push/emailpusher.py51
-rw-r--r--synapse/push/httppusher.py19
-rw-r--r--synapse/push/mailer.py120
-rw-r--r--synapse/push/push_tools.py32
-rw-r--r--synapse/python_dependencies.py6
-rw-r--r--synapse/replication/presence_resource.py59
-rw-r--r--synapse/replication/resource.py2
-rw-r--r--synapse/replication/slave/storage/account_data.py41
-rw-r--r--synapse/replication/slave/storage/appservice.py30
-rw-r--r--synapse/replication/slave/storage/directory.py23
-rw-r--r--synapse/replication/slave/storage/events.py51
-rw-r--r--synapse/replication/slave/storage/filtering.py25
-rw-r--r--synapse/replication/slave/storage/keys.py33
-rw-r--r--synapse/replication/slave/storage/presence.py59
-rw-r--r--synapse/replication/slave/storage/push_rule.py67
-rw-r--r--synapse/replication/slave/storage/receipts.py25
-rw-r--r--synapse/replication/slave/storage/registration.py30
-rw-r--r--synapse/replication/slave/storage/room.py21
-rw-r--r--synapse/replication/slave/storage/transactions.py30
-rw-r--r--synapse/rest/__init__.py2
-rw-r--r--synapse/rest/client/v1/admin.py77
-rw-r--r--synapse/rest/client/v1/base.py4
-rw-r--r--synapse/rest/client/v1/events.py45
-rw-r--r--synapse/rest/client/v1/login.py235
-rw-r--r--synapse/rest/client/v1/push_rule.py6
-rw-r--r--synapse/rest/client/v1/pusher.py57
-rw-r--r--synapse/rest/client/v1/register.py33
-rw-r--r--synapse/rest/client/v1/room.py25
-rw-r--r--synapse/rest/client/v2_alpha/_base.py13
-rw-r--r--synapse/rest/client/v2_alpha/account.py118
-rw-r--r--synapse/rest/client/v2_alpha/auth.py2
-rw-r--r--synapse/rest/client/v2_alpha/devices.py100
-rw-r--r--synapse/rest/client/v2_alpha/keys.py102
-rw-r--r--synapse/rest/client/v2_alpha/register.py345
-rw-r--r--synapse/rest/client/v2_alpha/tokenrefresh.py12
-rw-r--r--synapse/rest/client/versions.py6
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py4
-rw-r--r--synapse/rest/media/v0/content_repository.py112
-rw-r--r--synapse/rest/media/v1/filepath.py6
-rw-r--r--synapse/rest/media/v1/media_repository.py105
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py98
-rw-r--r--synapse/server.py68
-rw-r--r--synapse/server.pyi29
-rw-r--r--synapse/state.py6
-rw-r--r--synapse/storage/__init__.py67
-rw-r--r--synapse/storage/_base.py25
-rw-r--r--synapse/storage/account_data.py3
-rw-r--r--synapse/storage/appservice.py21
-rw-r--r--synapse/storage/background_updates.py98
-rw-r--r--synapse/storage/client_ips.py145
-rw-r--r--synapse/storage/devices.py137
-rw-r--r--synapse/storage/end_to_end_keys.py75
-rw-r--r--synapse/storage/engines/__init__.py2
-rw-r--r--synapse/storage/engines/postgres.py13
-rw-r--r--synapse/storage/engines/sqlite3.py2
-rw-r--r--synapse/storage/event_push_actions.py230
-rw-r--r--synapse/storage/events.py812
-rw-r--r--synapse/storage/keys.py38
-rw-r--r--synapse/storage/media_repository.py44
-rw-r--r--synapse/storage/prepare_database.py2
-rw-r--r--synapse/storage/presence.py3
-rw-r--r--synapse/storage/push_rule.py70
-rw-r--r--synapse/storage/pusher.py38
-rw-r--r--synapse/storage/receipts.py28
-rw-r--r--synapse/storage/registration.py207
-rw-r--r--synapse/storage/room.py32
-rw-r--r--synapse/storage/roommember.py29
-rw-r--r--synapse/storage/schema/delta/30/as_users.py4
-rw-r--r--synapse/storage/schema/delta/33/access_tokens_device_index.sql17
-rw-r--r--synapse/storage/schema/delta/33/devices.sql21
-rw-r--r--synapse/storage/schema/delta/33/devices_for_e2e_keys.sql19
-rw-r--r--synapse/storage/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql20
-rw-r--r--synapse/storage/schema/delta/33/event_fields.py60
-rw-r--r--synapse/storage/schema/delta/33/refreshtoken_device.sql16
-rw-r--r--synapse/storage/schema/delta/33/refreshtoken_device_index.sql17
-rw-r--r--synapse/storage/schema/delta/33/remote_media_ts.py31
-rw-r--r--synapse/storage/schema/delta/33/user_ips_index.sql17
-rw-r--r--synapse/storage/search.py29
-rw-r--r--synapse/storage/signatures.py25
-rw-r--r--synapse/storage/stream.py238
-rw-r--r--synapse/storage/tags.py3
-rw-r--r--synapse/storage/transactions.py3
-rw-r--r--synapse/types.py38
-rw-r--r--synapse/util/async.py91
-rw-r--r--synapse/util/caches/__init__.py20
-rw-r--r--synapse/util/caches/descriptors.py52
-rw-r--r--synapse/util/caches/dictionary_cache.py8
-rw-r--r--synapse/util/caches/expiringcache.py8
-rw-r--r--synapse/util/caches/response_cache.py13
-rw-r--r--synapse/util/caches/stream_change_cache.py16
-rw-r--r--synapse/util/distributor.py4
-rw-r--r--synapse/util/logcontext.py1
-rw-r--r--synapse/util/metrics.py19
-rw-r--r--synapse/util/presentable_names.py29
-rw-r--r--synapse/util/retryutils.py2
-rw-r--r--synapse/util/versionstring.py8
-rw-r--r--tests/api/test_auth.py41
-rw-r--r--tests/config/test_generate.py2
-rw-r--r--tests/config/test_load.py22
-rw-r--r--tests/handlers/test_appservice.py6
-rw-r--r--tests/handlers/test_auth.py52
-rw-r--r--tests/handlers/test_device.py184
-rw-r--r--tests/handlers/test_e2e_keys.py46
-rw-r--r--tests/handlers/test_presence.py16
-rw-r--r--tests/handlers/test_profile.py10
-rw-r--r--tests/handlers/test_register.py39
-rw-r--r--tests/handlers/test_typing.py4
-rw-r--r--tests/metrics/test_metric.py23
-rw-r--r--tests/replication/slave/storage/test_events.py41
-rw-r--r--tests/replication/test_resource.py20
-rw-r--r--tests/rest/client/v1/test_profile.py13
-rw-r--r--tests/rest/client/v2_alpha/test_register.py28
-rw-r--r--tests/storage/event_injector.py1
-rw-r--r--tests/storage/test_appservice.py2
-rw-r--r--tests/storage/test_background_update.py20
-rw-r--r--tests/storage/test_client_ips.py62
-rw-r--r--tests/storage/test_devices.py105
-rw-r--r--tests/storage/test_end_to_end_keys.py90
-rw-r--r--tests/storage/test_event_push_actions.py41
-rw-r--r--tests/storage/test_events.py12
-rw-r--r--tests/storage/test_registration.py55
-rw-r--r--tests/test_preview.py139
-rw-r--r--tests/unittest.py11
-rw-r--r--tests/util/test_rwlock.py85
-rw-r--r--tests/utils.py24
-rw-r--r--tox.ini4
208 files changed, 9806 insertions, 3563 deletions
diff --git a/CHANGES.rst b/CHANGES.rst
index 8c180750ad..7ebb42b0fc 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,297 @@
+Changes in synapse v0.17.0 (2016-08-08)
+=======================================
+
+This release contains significant security bug fixes regarding authenticating
+events received over federation. PLEASE UPGRADE.
+
+This release changes the LDAP configuration format in a backwards incompatible
+way, see PR #843 for details.
+
+
+Changes:
+
+* Add federation /version API (PR #990)
+* Make psutil dependency optional (PR #992)
+
+
+Bug fixes:
+
+* Fix URL preview API to exclude HTML comments in description (PR #988)
+* Fix error handling of remote joins (PR #991)
+
+
+Changes in synapse v0.17.0-rc4 (2016-08-05)
+===========================================
+
+Changes:
+
+* Change the way we summarize URLs when previewing (PR #973)
+* Add new ``/state_ids/`` federation API (PR #979)
+* Speed up processing of ``/state/`` response (PR #986)
+
+Bug fixes:
+
+* Fix event persistence when event has already been partially persisted
+  (PR #975, #983, #985)
+* Fix port script to also copy across backfilled events (PR #982)
+
+
+Changes in synapse v0.17.0-rc3 (2016-08-02)
+===========================================
+
+Changes:
+
+* Forbid non-ASes from registering users whose names begin with '_' (PR #958)
+* Add some basic admin API docs (PR #963)
+
+
+Bug fixes:
+
+* Send the correct host header when fetching keys (PR #941)
+* Fix joining a room that has missing auth events (PR #964)
+* Fix various push bugs (PR #966, #970)
+* Fix adding emails on registration (PR #968)
+
+
+Changes in synapse v0.17.0-rc2 (2016-08-02)
+===========================================
+
+(This release did not include the changes advertised and was identical to RC1)
+
+
+Changes in synapse v0.17.0-rc1 (2016-07-28)
+===========================================
+
+This release changes the LDAP configuration format in a backwards incompatible
+way, see PR #843 for details.
+
+
+Features:
+
+* Add purge_media_cache admin API (PR #902)
+* Add deactivate account admin API (PR #903)
+* Add optional pepper to password hashing (PR #907, #910 by KentShikama)
+* Add an admin option to shared secret registration (breaks backwards compat)
+  (PR #909)
+* Add purge local room history API (PR #911, #923, #924)
+* Add requestToken endpoints (PR #915)
+* Add an /account/deactivate endpoint (PR #921)
+* Add filter param to /messages. Add 'contains_url' to filter. (PR #922)
+* Add device_id support to /login (PR #929)
+* Add device_id support to /v2/register flow. (PR #937, #942)
+* Add GET /devices endpoint (PR #939, #944)
+* Add GET /device/{deviceId} (PR #943)
+* Add update and delete APIs for devices (PR #949)
+
+
+Changes:
+
+* Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
+* Linearize some federation endpoints based on (origin, room_id) (PR #879)
+* Remove the legacy v0 content upload API. (PR #888)
+* Use similar naming we use in email notifs for push (PR #894)
+* Optionally include password hash in createUser endpoint (PR #905 by
+  KentShikama)
+* Use a query that postgresql optimises better for get_events_around (PR #906)
+* Fall back to 'username' if 'user' is not given for appservice registration.
+  (PR #927 by Half-Shot)
+* Add metrics for psutil derived memory usage (PR #936)
+* Record device_id in client_ips (PR #938)
+* Send the correct host header when fetching keys (PR #941)
+* Log the hostname the reCAPTCHA was completed on (PR #946)
+* Make the device id on e2e key upload optional (PR #956)
+* Add r0.2.0 to the "supported versions" list (PR #960)
+* Don't include name of room for invites in push (PR #961)
+
+
+Bug fixes:
+
+* Fix substitution failure in mail template (PR #887)
+* Put most recent 20 messages in email notif (PR #892)
+* Ensure that the guest user is in the database when upgrading accounts
+  (PR #914)
+* Fix various edge cases in auth handling (PR #919)
+* Fix 500 ISE when sending alias event without a state_key (PR #925)
+* Fix bug where we stored rejections in the state_group, persist all
+  rejections (PR #948)
+* Fix lack of check of if the user is banned when handling 3pid invites
+  (PR #952)
+* Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
+
+
+
+Changes in synapse v0.16.1-r1 (2016-07-08)
+==========================================
+
+THIS IS A CRITICAL SECURITY UPDATE.
+
+This fixes a bug which allowed users' accounts to be accessed by unauthorised
+users.
+
+Changes in synapse v0.16.1 (2016-06-20)
+=======================================
+
+Bug fixes:
+
+* Fix assorted bugs in ``/preview_url`` (PR #872)
+* Fix TypeError when setting unicode passwords (PR #873)
+
+
+Performance improvements:
+
+* Turn ``use_frozen_events`` off by default (PR #877)
+* Disable responding with canonical json for federation (PR #878)
+
+
+Changes in synapse v0.16.1-rc1 (2016-06-15)
+===========================================
+
+Features: None
+
+Changes:
+
+* Log requester for ``/publicRoom`` endpoints when possible (PR #856)
+* 502 on ``/thumbnail`` when can't connect to remote server (PR #862)
+* Linearize fetching of gaps on incoming events (PR #871)
+
+
+Bugs fixes:
+
+* Fix bug where rooms where marked as published by default (PR #857)
+* Fix bug where joining room with an event with invalid sender (PR #868)
+* Fix bug where backfilled events were sent down sync streams (PR #869)
+* Fix bug where outgoing connections could wedge indefinitely, causing push
+  notifications to be unreliable (PR #870)
+
+
+Performance improvements:
+
+* Improve ``/publicRooms`` performance(PR #859)
+
+
+Changes in synapse v0.16.0 (2016-06-09)
+=======================================
+
+NB: As of v0.14 all AS config files must have an ID field.
+
+
+Bug fixes:
+
+* Don't make rooms published by default (PR #857)
+
+Changes in synapse v0.16.0-rc2 (2016-06-08)
+===========================================
+
+Features:
+
+* Add configuration option for tuning GC via ``gc.set_threshold`` (PR #849)
+
+Changes:
+
+* Record metrics about GC (PR #771, #847, #852)
+* Add metric counter for number of persisted events (PR #841)
+
+Bug fixes:
+
+* Fix 'From' header in email notifications (PR #843)
+* Fix presence where timeouts were not being fired for the first 8h after
+  restarts (PR #842)
+* Fix bug where synapse sent malformed transactions to AS's when retrying
+  transactions (Commits 310197b, 8437906)
+
+Performance improvements:
+
+* Remove event fetching from DB threads (PR #835)
+* Change the way we cache events (PR #836)
+* Add events to cache when we persist them (PR #840)
+
+
+Changes in synapse v0.16.0-rc1 (2016-06-03)
+===========================================
+
+Version 0.15 was not released. See v0.15.0-rc1 below for additional changes.
+
+Features:
+
+* Add email notifications for missed messages (PR #759, #786, #799, #810, #815,
+  #821)
+* Add a ``url_preview_ip_range_whitelist`` config param (PR #760)
+* Add /report endpoint (PR #762)
+* Add basic ignore user API (PR #763)
+* Add an openidish mechanism for proving that you own a given user_id (PR #765)
+* Allow clients to specify a server_name to avoid 'No known servers' (PR #794)
+* Add secondary_directory_servers option to fetch room list from other servers
+  (PR #808, #813)
+
+Changes:
+
+* Report per request metrics for all of the things using request_handler (PR
+  #756)
+* Correctly handle ``NULL`` password hashes from the database (PR #775)
+* Allow receipts for events we haven't seen in the db (PR #784)
+* Make synctl read a cache factor from config file (PR #785)
+* Increment badge count per missed convo, not per msg (PR #793)
+* Special case m.room.third_party_invite event auth to match invites (PR #814)
+
+
+Bug fixes:
+
+* Fix typo in event_auth servlet path (PR #757)
+* Fix password reset (PR #758)
+
+
+Performance improvements:
+
+* Reduce database inserts when sending transactions (PR #767)
+* Queue events by room for persistence (PR #768)
+* Add cache to ``get_user_by_id`` (PR #772)
+* Add and use ``get_domain_from_id`` (PR #773)
+* Use tree cache for ``get_linearized_receipts_for_room`` (PR #779)
+* Remove unused indices (PR #782)
+* Add caches to ``bulk_get_push_rules*`` (PR #804)
+* Cache ``get_event_reference_hashes`` (PR #806)
+* Add ``get_users_with_read_receipts_in_room`` cache (PR #809)
+* Use state to calculate ``get_users_in_room`` (PR #811)
+* Load push rules in storage layer so that they get cached (PR #825)
+* Make ``get_joined_hosts_for_room`` use get_users_in_room (PR #828)
+* Poke notifier on next reactor tick (PR #829)
+* Change CacheMetrics to be quicker (PR #830)
+
+
+Changes in synapse v0.15.0-rc1 (2016-04-26)
+===========================================
+
+Features:
+
+* Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck
+  (PR #671,#687)
+* Add URL previewing support (PR #688)
+* Add login support for LDAP, thanks to Christoph Witzany (PR #701)
+* Add GET endpoint for pushers (PR #716)
+
+Changes:
+
+* Never notify for member events (PR #667)
+* Deduplicate identical ``/sync`` requests (PR #668)
+* Require user to have left room to forget room (PR #673)
+* Use DNS cache if within TTL (PR #677)
+* Let users see their own leave events (PR #699)
+* Deduplicate membership changes (PR #700)
+* Increase performance of pusher code (PR #705)
+* Respond with error status 504 if failed to talk to remote server (PR #731)
+* Increase search performance on postgres (PR #745)
+
+Bug fixes:
+
+* Fix bug where disabling all notifications still resulted in push (PR #678)
+* Fix bug where users couldn't reject remote invites if remote refused (PR #691)
+* Fix bug where synapse attempted to backfill from itself (PR #693)
+* Fix bug where profile information was not correctly added when joining remote
+  rooms (PR #703)
+* Fix bug where register API required incorrect key name for AS registration
+  (PR #727)
+
+
 Changes in synapse v0.14.0 (2016-03-30)
 =======================================
 
@@ -511,7 +805,7 @@ Configuration:
 
 * Add support for changing the bind host of the metrics listener via the
   ``metrics_bind_host`` option.
- 
+
 
 Changes in synapse v0.9.0-r5 (2015-05-21)
 =========================================
@@ -853,7 +1147,7 @@ See UPGRADE for information about changes to the client server API, including
 breaking backwards compatibility with VoIP calls and registration API.
 
 Homeserver:
- * When a user changes their displayname or avatar the server will now update 
+ * When a user changes their displayname or avatar the server will now update
    all their join states to reflect this.
  * The server now adds "age" key to events to indicate how old they are. This
    is clock independent, so at no point does any server or webclient have to
@@ -911,7 +1205,7 @@ Changes in synapse 0.2.2 (2014-09-06)
 =====================================
 
 Homeserver:
- * When the server returns state events it now also includes the previous 
+ * When the server returns state events it now also includes the previous
    content.
  * Add support for inviting people when creating a new room.
  * Make the homeserver inform the room via `m.room.aliases` when a new alias
@@ -923,7 +1217,7 @@ Webclient:
  * Handle `m.room.aliases` events.
  * Asynchronously send messages and show a local echo.
  * Inform the UI when a message failed to send.
- * Only autoscroll on receiving a new message if the user was already at the 
+ * Only autoscroll on receiving a new message if the user was already at the
    bottom of the screen.
  * Add support for ban/kick reasons.
 
diff --git a/MANIFEST.in b/MANIFEST.in
index dfb7c9d28d..981698143f 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -14,6 +14,7 @@ recursive-include docs *
 recursive-include res *
 recursive-include scripts *
 recursive-include scripts-dev *
+recursive-include synapse *.pyi
 recursive-include tests *.py
 
 recursive-include synapse/static *.css
@@ -23,5 +24,7 @@ recursive-include synapse/static *.js
 
 exclude jenkins.sh
 exclude jenkins*.sh
+exclude jenkins*
+recursive-exclude jenkins *.sh
 
 prune demo/etc
diff --git a/README.rst b/README.rst
index 02e7c61d1e..d658670835 100644
--- a/README.rst
+++ b/README.rst
@@ -11,8 +11,8 @@ VoIP.  The basics you need to know to get up and running are:
   like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
 
 - Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
-  you will normally refer to yourself and others using a 3PID: email
-  address, phone number, etc rather than manipulating Matrix user IDs)
+  you will normally refer to yourself and others using a third party identifier 
+  (3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
 
 The overall architecture is::
 
@@ -58,12 +58,13 @@ the spec in the context of a codebase and let you run your own homeserver and
 generally help bootstrap the ecosystem.
 
 In Matrix, every user runs one or more Matrix clients, which connect through to
-a Matrix homeserver which stores all their personal chat history and user
-account information - much as a mail client connects through to an IMAP/SMTP
-server. Just like email, you can either run your own Matrix homeserver and
-control and own your own communications and history or use one hosted by
-someone else (e.g. matrix.org) - there is no single point of control or
-mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, etc.
+a Matrix homeserver. The homeserver stores all their personal chat history and
+user account information - much as a mail client connects through to an
+IMAP/SMTP server. Just like email, you can either run your own Matrix
+homeserver and control and own your own communications and history or use one
+hosted by someone else (e.g. matrix.org) - there is no single point of control
+or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
+etc.
 
 Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
 web client demo implemented in AngularJS) and cmdclient (a basic Python
@@ -444,7 +445,7 @@ You have two choices here, which will influence the form of your Matrix user
 IDs:
 
 1) Use the machine's own hostname as available on public DNS in the form of
-   its A or AAAA records. This is easier to set up initially, perhaps for
+   its A records. This is easier to set up initially, perhaps for
    testing, but lacks the flexibility of SRV.
 
 2) Set up a SRV record for your domain name. This requires you create a SRV
@@ -617,7 +618,7 @@ Building internal API documentation::
 
 
 
-Halp!! Synapse eats all my RAM!
+Help!! Synapse eats all my RAM!
 ===============================
 
 Synapse's architecture is quite RAM hungry currently - we deliberately
diff --git a/UPGRADE.rst b/UPGRADE.rst
index 699f04c2c2..9f044719a0 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -27,7 +27,7 @@ running:
     # Pull the latest version of the master branch.
     git pull
     # Update the versions of synapse's python dependencies.
-    python synapse/python_dependencies.py | xargs -n1 pip install
+    python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
 
 
 Upgrading to v0.15.0
diff --git a/contrib/systemd/synapse.service b/contrib/systemd/synapse.service
index 2e8cd21c9e..967a4debfd 100644
--- a/contrib/systemd/synapse.service
+++ b/contrib/systemd/synapse.service
@@ -9,6 +9,7 @@ Description=Synapse Matrix homeserver
 Type=simple
 User=synapse
 Group=synapse
+EnvironmentFile=-/etc/sysconfig/synapse
 WorkingDirectory=/var/lib/synapse
 ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
 
diff --git a/docs/admin_api/README.rst b/docs/admin_api/README.rst
new file mode 100644
index 0000000000..d4f564cfae
--- /dev/null
+++ b/docs/admin_api/README.rst
@@ -0,0 +1,12 @@
+Admin APIs
+==========
+
+This directory includes documentation for the various synapse specific admin
+APIs available.
+
+Only users that are server admins can use these APIs. A user can be marked as a
+server admin by updating the database directly, e.g.:
+
+``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
+
+Restarting may be required for the changes to register.
diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst
new file mode 100644
index 0000000000..986efe40f9
--- /dev/null
+++ b/docs/admin_api/purge_history_api.rst
@@ -0,0 +1,15 @@
+Purge History API
+=================
+
+The purge history API allows server admins to purge historic events from their
+database, reclaiming disk space.
+
+Depending on the amount of history being purged a call to the API may take
+several minutes or longer. During this period users will not be able to
+paginate further back in the room from the point being purged from.
+
+The API is simply:
+
+``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
+
+including an ``access_token`` of a server admin.
diff --git a/docs/admin_api/purge_remote_media.rst b/docs/admin_api/purge_remote_media.rst
new file mode 100644
index 0000000000..b26c6a9e7b
--- /dev/null
+++ b/docs/admin_api/purge_remote_media.rst
@@ -0,0 +1,19 @@
+Purge Remote Media API
+======================
+
+The purge remote media API allows server admins to purge old cached remote
+media. 
+
+The API is::
+
+    POST /_matrix/client/r0/admin/purge_media_cache
+
+    {
+        "before_ts": <unix_timestamp_in_ms>
+    }
+
+Which will remove all cached media that was last accessed before
+``<unix_timestamp_in_ms>``.
+
+If the user re-requests purged remote media, synapse will re-request the media
+from the originating server.
diff --git a/docs/application_services.rst b/docs/application_services.rst
index 7e87ac9ad6..fbc0c7e960 100644
--- a/docs/application_services.rst
+++ b/docs/application_services.rst
@@ -32,5 +32,4 @@ The format of the AS configuration file is as follows:
 
 See the spec_ for further details on how application services work.
 
-.. _spec: https://github.com/matrix-org/matrix-doc/blob/master/specification/25_application_service_api.rst#application-service-api
-
+.. _spec: https://matrix.org/docs/spec/application_service/unstable.html
diff --git a/docs/code_style.rst b/docs/code_style.rst
index dc40a7ab7b..8d73d17beb 100644
--- a/docs/code_style.rst
+++ b/docs/code_style.rst
@@ -43,7 +43,10 @@ Basically, PEP8
   together, or want to deliberately extend or preserve vertical/horizontal
   space)
 
-Comments should follow the google code style. This is so that we can generate
-documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/) 
+Comments should follow the `google code style <http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
+This is so that we can generate documentation with 
+`sphinx <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
+`examples <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
+in the sphinx documentation.
 
 Code should pass pep8 --max-line-length=100 without any warnings.
diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst
index e2c73458e2..04c0100715 100644
--- a/docs/turn-howto.rst
+++ b/docs/turn-howto.rst
@@ -9,31 +9,35 @@ the Home Server to generate credentials that are valid for use on the TURN
 server through the use of a secret shared between the Home Server and the
 TURN server.
 
-This document described how to install coturn
-(https://code.google.com/p/coturn/) which also supports the TURN REST API,
+This document describes how to install coturn
+(https://github.com/coturn/coturn) which also supports the TURN REST API,
 and integrate it with synapse.
 
 coturn Setup
 ============
 
+You may be able to setup coturn via your package manager,  or set it up manually using the usual ``configure, make, make install`` process.  
+
  1. Check out coturn::
-      svn checkout http://coturn.googlecode.com/svn/trunk/ coturn
+ 
+      git clone https://github.com/coturn/coturn.git coturn
       cd coturn
 
  2. Configure it::
+ 
       ./configure
 
-    You may need to install libevent2: if so, you should do so
+    You may need to install ``libevent2``: if so, you should do so
     in the way recommended by your operating system.
     You can ignore warnings about lack of database support: a
     database is unnecessary for this purpose.
 
  3. Build and install it::
+ 
       make
       make install
 
- 4. Make a config file in /etc/turnserver.conf. You can customise
-    a config file from turnserver.conf.default. The relevant
+ 4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
     lines, with example values, are::
 
       lt-cred-mech
@@ -41,7 +45,7 @@ coturn Setup
       static-auth-secret=[your secret key here]
       realm=turn.myserver.org
 
-    See turnserver.conf.default for explanations of the options.
+    See turnserver.conf for explanations of the options.
     One way to generate the static-auth-secret is with pwgen::
 
        pwgen -s 64 1
@@ -54,6 +58,7 @@ coturn Setup
     import your private key and certificate.
 
  7. Start the turn server::
+ 
        bin/turnserver -o
 
 
diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh
new file mode 100755
index 0000000000..68912a8967
--- /dev/null
+++ b/jenkins-dendron-postgres.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+set -eux
+
+: ${WORKSPACE:="$(pwd)"}
+
+export WORKSPACE
+export PYTHONDONTWRITEBYTECODE=yep
+export SYNAPSE_CACHE_FACTOR=1
+
+./jenkins/prepare_synapse.sh
+./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
+./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
+./dendron/jenkins/build_dendron.sh
+./sytest/jenkins/prep_sytest_for_postgres.sh
+
+./sytest/jenkins/install_and_run.sh \
+    --synapse-directory $WORKSPACE \
+    --dendron $WORKSPACE/dendron/bin/dendron \
+    --pusher \
+    --synchrotron \
+    --federation-reader \
diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh
index ae6b111591..f2ca8ccdff 100755
--- a/jenkins-postgres.sh
+++ b/jenkins-postgres.sh
@@ -4,60 +4,14 @@ set -eux
 
 : ${WORKSPACE:="$(pwd)"}
 
+export WORKSPACE
 export PYTHONDONTWRITEBYTECODE=yep
 export SYNAPSE_CACHE_FACTOR=1
 
-# Output test results as junit xml
-export TRIAL_FLAGS="--reporter=subunit"
-export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
-# Write coverage reports to a separate file for each process
-export COVERAGE_OPTS="-p"
-export DUMP_COVERAGE_COMMAND="coverage help"
+./jenkins/prepare_synapse.sh
+./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
 
-# Output flake8 violations to violations.flake8.log
-# Don't exit with non-0 status code on Jenkins,
-# so that the build steps continue and a later step can decided whether to
-# UNSTABLE or FAILURE this build.
-export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
+./sytest/jenkins/prep_sytest_for_postgres.sh
 
-rm .coverage* || echo "No coverage files to remove"
-
-tox --notest -e py27
-
-TOX_BIN=$WORKSPACE/.tox/py27/bin
-python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
-$TOX_BIN/pip install psycopg2
-$TOX_BIN/pip install lxml
-
-: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
-
-if [[ ! -e .sytest-base ]]; then
-  git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
-else
-  (cd .sytest-base; git fetch -p)
-fi
-
-rm -rf sytest
-git clone .sytest-base sytest --shared
-cd sytest
-
-git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
-
-: ${PORT_BASE:=8000}
-
-./jenkins/prep_sytest_for_postgres.sh
-
-echo >&2 "Running sytest with PostgreSQL";
-./jenkins/install_and_run.sh --coverage \
-                             --python $TOX_BIN/python \
-                             --synapse-directory $WORKSPACE \
-                             --port-base $PORT_BASE
-
-cd ..
-cp sytest/.coverage.* .
-
-# Combine the coverage reports
-echo "Combining:" .coverage.*
-$TOX_BIN/python -m coverage combine
-# Output coverage to coverage.xml
-$TOX_BIN/coverage xml -o coverage.xml
+./sytest/jenkins/install_and_run.sh \
+    --synapse-directory $WORKSPACE \
diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh
index 9398d9db15..84613d979c 100755
--- a/jenkins-sqlite.sh
+++ b/jenkins-sqlite.sh
@@ -4,54 +4,12 @@ set -eux
 
 : ${WORKSPACE:="$(pwd)"}
 
+export WORKSPACE
 export PYTHONDONTWRITEBYTECODE=yep
 export SYNAPSE_CACHE_FACTOR=1
 
-# Output test results as junit xml
-export TRIAL_FLAGS="--reporter=subunit"
-export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
-# Write coverage reports to a separate file for each process
-export COVERAGE_OPTS="-p"
-export DUMP_COVERAGE_COMMAND="coverage help"
+./jenkins/prepare_synapse.sh
+./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
 
-# Output flake8 violations to violations.flake8.log
-# Don't exit with non-0 status code on Jenkins,
-# so that the build steps continue and a later step can decided whether to
-# UNSTABLE or FAILURE this build.
-export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
-
-rm .coverage* || echo "No coverage files to remove"
-
-tox --notest -e py27
-TOX_BIN=$WORKSPACE/.tox/py27/bin
-python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
-$TOX_BIN/pip install lxml
-
-: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
-
-if [[ ! -e .sytest-base ]]; then
-  git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
-else
-  (cd .sytest-base; git fetch -p)
-fi
-
-rm -rf sytest
-git clone .sytest-base sytest --shared
-cd sytest
-
-git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
-
-: ${PORT_BASE:=8500}
-./jenkins/install_and_run.sh --coverage \
-                             --python $TOX_BIN/python \
-                             --synapse-directory $WORKSPACE \
-                             --port-base $PORT_BASE
-
-cd ..
-cp sytest/.coverage.* .
-
-# Combine the coverage reports
-echo "Combining:" .coverage.*
-$TOX_BIN/python -m coverage combine
-# Output coverage to coverage.xml
-$TOX_BIN/coverage xml -o coverage.xml
+./sytest/jenkins/install_and_run.sh \
+    --synapse-directory $WORKSPACE \
diff --git a/jenkins-unittests.sh b/jenkins-unittests.sh
index 104d511994..6b0c296cff 100755
--- a/jenkins-unittests.sh
+++ b/jenkins-unittests.sh
@@ -22,4 +22,8 @@ export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished w
 
 rm .coverage* || echo "No coverage files to remove"
 
+tox --notest -e py27
+TOX_BIN=$WORKSPACE/.tox/py27/bin
+python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
+
 tox -e py27
diff --git a/jenkins/clone.sh b/jenkins/clone.sh
new file mode 100755
index 0000000000..ab30ac7782
--- /dev/null
+++ b/jenkins/clone.sh
@@ -0,0 +1,44 @@
+#! /bin/bash
+
+# This clones a project from github into a named subdirectory
+# If the project has a branch with the same name as this branch
+# then it will checkout that branch after cloning.
+# Otherwise it will checkout "origin/develop."
+# The first argument is the name of the directory to checkout
+# the branch into.
+# The second argument is the URL of the remote repository to checkout.
+# Usually something like https://github.com/matrix-org/sytest.git
+
+set -eux
+
+NAME=$1
+PROJECT=$2
+BASE=".$NAME-base"
+
+# Update our mirror.
+if [ ! -d ".$NAME-base" ]; then
+  # Create a local mirror of the source repository.
+  # This saves us from having to download the entire repository
+  # when this script is next run.
+  git clone "$PROJECT" "$BASE" --mirror
+else
+  # Fetch any updates from the source repository.
+  (cd "$BASE"; git fetch -p)
+fi
+
+# Remove the existing repository so that we have a clean copy
+rm -rf "$NAME"
+# Cloning with --shared means that we will share portions of the
+# .git directory with our local mirror.
+git clone "$BASE" "$NAME" --shared
+
+# Jenkins may have supplied us with the name of the branch in the
+# environment. Otherwise we will have to guess based on the current
+# commit.
+: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
+cd "$NAME"
+# check out the relevant branch
+git checkout "${GIT_BRANCH}" || (
+    echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
+    git checkout "origin/develop"
+)
diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh
new file mode 100755
index 0000000000..237223c81b
--- /dev/null
+++ b/jenkins/prepare_synapse.sh
@@ -0,0 +1,19 @@
+#! /bin/bash
+
+cd "`dirname $0`/.."
+
+TOX_DIR=$WORKSPACE/.tox
+
+mkdir -p $TOX_DIR
+
+if ! [ $TOX_DIR -ef .tox ]; then
+    ln -s "$TOX_DIR" .tox
+fi
+
+# set up the virtualenv
+tox -e py27 --notest -v
+
+TOX_BIN=$TOX_DIR/py27/bin
+python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
+$TOX_BIN/pip install lxml
+$TOX_BIN/pip install psycopg2
diff --git a/res/templates/mail.css b/res/templates/mail.css
index f2b5e84abc..5ab3e1b06d 100644
--- a/res/templates/mail.css
+++ b/res/templates/mail.css
@@ -145,6 +145,11 @@ pre, code {
     text-decoration: none;
 }
 
+.debug {
+    font-size: 10px;
+    color: #888;
+}
+
 .footer {
     margin-top: 20px;
     text-align: center;
diff --git a/res/templates/notif.html b/res/templates/notif.html
index 834840861e..88b921ca9c 100644
--- a/res/templates/notif.html
+++ b/res/templates/notif.html
@@ -17,11 +17,15 @@
         </td>
         <td class="message_contents">
             {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
-                <div class="sender_name">{{ message.sender_name }}</div>
+                <div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
             {% endif %}
             <div class="message_body">
                 {% if message.msgtype == "m.text" %}
                     {{ message.body_text_html }}
+                {% elif message.msgtype == "m.emote" %}
+                    {{ message.body_text_html }}
+                {% elif message.msgtype == "m.notice" %}
+                    {{ message.body_text_html }}
                 {% elif message.msgtype == "m.image" %}
                     <img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
                 {% elif message.msgtype == "m.file" %}
diff --git a/res/templates/notif.txt b/res/templates/notif.txt
index a3ddac80ce..a37bee9833 100644
--- a/res/templates/notif.txt
+++ b/res/templates/notif.txt
@@ -1,7 +1,11 @@
 {% for message in notif.messages %}
-{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
+{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
 {% if message.msgtype == "m.text" %}
 {{ message.body_text_plain }}
+{% elif message.msgtype == "m.emote" %}
+{{ message.body_text_plain }}
+{% elif message.msgtype == "m.notice" %}
+{{ message.body_text_plain }}
 {% elif message.msgtype == "m.image" %}
 {{ message.body_text_plain }}
 {% elif message.msgtype == "m.file" %}
diff --git a/res/templates/notif_mail.html b/res/templates/notif_mail.html
index dc13398df1..535bea764d 100644
--- a/res/templates/notif_mail.html
+++ b/res/templates/notif_mail.html
@@ -30,18 +30,20 @@
                         {% include 'room.html' with context %}
                     {% endfor %}
                     <div class="footer">
-                        <small>
-                            Sending email at {{ reason.now|format_ts("%c") }} due to activity in room '{{ reason.room_name }}' because:<br/>
-                            1. An event was received at {{ reason.received_at|format_ts("%c") }}
-                            which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} (delay_before_mail_ms) mins ago.<br/>
+                        <a href="{{ unsubscribe_link }}">Unsubscribe</a>
+                        <br/>
+                        <br/>
+                        <div class="debug">
+                            Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
+                            an event was received at {{ reason.received_at|format_ts("%c") }}
+                            which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
                             {% if reason.last_sent_ts %}
-                                2. The last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
+                                and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
                                 which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
                             {% else %}
-                                2. We can't remember the last time we sent a mail for this room.
+                                and we don't have a last time we sent a mail for this room.
                             {% endif %}
-                        </small>
-                        <a href="{{ unsubscribe_link }}">Unsubscribe</a>
+                        </div>
                     </div>
                 </td>
                 <td> </td>
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
index ea62dceb36..d1ab42d3af 100644
--- a/scripts-dev/federation_client.py
+++ b/scripts-dev/federation_client.py
@@ -116,17 +116,19 @@ def get_json(origin_name, origin_key, destination, path):
     authorization_headers = []
 
     for key, sig in signed_json["signatures"][origin_name].items():
-        authorization_headers.append(bytes(
-            "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
-                origin_name, key, sig,
-            )
-        ))
+        header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
+            origin_name, key, sig,
+        )
+        authorization_headers.append(bytes(header))
+        sys.stderr.write(header)
+        sys.stderr.write("\n")
 
     result = requests.get(
         lookup(destination, path),
         headers={"Authorization": authorization_headers[0]},
         verify=False,
     )
+    sys.stderr.write("Status Code: %d\n" % (result.status_code,))
     return result.json()
 
 
@@ -141,6 +143,7 @@ def main():
     )
 
     json.dump(result, sys.stdout)
+    print ""
 
 if __name__ == "__main__":
     main()
diff --git a/scripts/hash_password b/scripts/hash_password
index e784600989..215ab25cfe 100755
--- a/scripts/hash_password
+++ b/scripts/hash_password
@@ -1,10 +1,16 @@
 #!/usr/bin/env python
 
 import argparse
+
+import sys
+
 import bcrypt
 import getpass
 
+import yaml
+
 bcrypt_rounds=12
+password_pepper = ""
 
 def prompt_for_pass():
     password = getpass.getpass("Password: ")
@@ -28,12 +34,22 @@ if __name__ == "__main__":
         default=None,
         help="New password for user. Will prompt if omitted.",
     )
+    parser.add_argument(
+        "-c", "--config",
+        type=argparse.FileType('r'),
+        help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
+    )
 
     args = parser.parse_args()
+    if "config" in args and args.config:
+        config = yaml.safe_load(args.config)
+        bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
+        password_config = config.get("password_config", {})
+        password_pepper = password_config.get("pepper", password_pepper)
     password = args.password
 
     if not password:
         password = prompt_for_pass()
 
-    print bcrypt.hashpw(password, bcrypt.gensalt(bcrypt_rounds))
+    print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
 
diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user
index 27a6250b14..12ed20d623 100755
--- a/scripts/register_new_matrix_user
+++ b/scripts/register_new_matrix_user
@@ -25,18 +25,26 @@ import urllib2
 import yaml
 
 
-def request_registration(user, password, server_location, shared_secret):
+def request_registration(user, password, server_location, shared_secret, admin=False):
     mac = hmac.new(
         key=shared_secret,
-        msg=user,
         digestmod=hashlib.sha1,
-    ).hexdigest()
+    )
+
+    mac.update(user)
+    mac.update("\x00")
+    mac.update(password)
+    mac.update("\x00")
+    mac.update("admin" if admin else "notadmin")
+
+    mac = mac.hexdigest()
 
     data = {
         "user": user,
         "password": password,
         "mac": mac,
         "type": "org.matrix.login.shared_secret",
+        "admin": admin,
     }
 
     server_location = server_location.rstrip("/")
@@ -68,7 +76,7 @@ def request_registration(user, password, server_location, shared_secret):
         sys.exit(1)
 
 
-def register_new_user(user, password, server_location, shared_secret):
+def register_new_user(user, password, server_location, shared_secret, admin):
     if not user:
         try:
             default_user = getpass.getuser()
@@ -99,7 +107,14 @@ def register_new_user(user, password, server_location, shared_secret):
             print "Passwords do not match"
             sys.exit(1)
 
-    request_registration(user, password, server_location, shared_secret)
+    if not admin:
+        admin = raw_input("Make admin [no]: ")
+        if admin in ("y", "yes", "true"):
+            admin = True
+        else:
+            admin = False
+
+    request_registration(user, password, server_location, shared_secret, bool(admin))
 
 
 if __name__ == "__main__":
@@ -119,6 +134,11 @@ if __name__ == "__main__":
         default=None,
         help="New password for user. Will prompt if omitted.",
     )
+    parser.add_argument(
+        "-a", "--admin",
+        action="store_true",
+        help="Register new user as an admin. Will prompt if omitted.",
+    )
 
     group = parser.add_mutually_exclusive_group(required=True)
     group.add_argument(
@@ -151,4 +171,4 @@ if __name__ == "__main__":
     else:
         secret = args.shared_secret
 
-    register_new_user(args.user, args.password, args.server_url, secret)
+    register_new_user(args.user, args.password, args.server_url, secret, args.admin)
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index efd04da2d6..66c61b0198 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -34,7 +34,7 @@ logger = logging.getLogger("synapse_port_db")
 
 
 BOOLEAN_COLUMNS = {
-    "events": ["processed", "outlier"],
+    "events": ["processed", "outlier", "contains_url"],
     "rooms": ["is_public"],
     "event_edges": ["is_state"],
     "presence_list": ["accepted"],
@@ -92,8 +92,12 @@ class Store(object):
 
     _simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
     _simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
+    _simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
+    _simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
     _simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
-    _simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
+    _simple_select_one_onecol_txn = SQLBaseStore.__dict__[
+        "_simple_select_one_onecol_txn"
+    ]
 
     _simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
     _simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
@@ -158,31 +162,40 @@ class Porter(object):
     def setup_table(self, table):
         if table in APPEND_ONLY_TABLES:
             # It's safe to just carry on inserting.
-            next_chunk = yield self.postgres_store._simple_select_one_onecol(
+            row = yield self.postgres_store._simple_select_one(
                 table="port_from_sqlite3",
                 keyvalues={"table_name": table},
-                retcol="rowid",
+                retcols=("forward_rowid", "backward_rowid"),
                 allow_none=True,
             )
 
             total_to_port = None
-            if next_chunk is None:
+            if row is None:
                 if table == "sent_transactions":
-                    next_chunk, already_ported, total_to_port = (
+                    forward_chunk, already_ported, total_to_port = (
                         yield self._setup_sent_transactions()
                     )
+                    backward_chunk = 0
                 else:
                     yield self.postgres_store._simple_insert(
                         table="port_from_sqlite3",
-                        values={"table_name": table, "rowid": 1}
+                        values={
+                            "table_name": table,
+                            "forward_rowid": 1,
+                            "backward_rowid": 0,
+                        }
                     )
 
-                    next_chunk = 1
+                    forward_chunk = 1
+                    backward_chunk = 0
                     already_ported = 0
+            else:
+                forward_chunk = row["forward_rowid"]
+                backward_chunk = row["backward_rowid"]
 
             if total_to_port is None:
                 already_ported, total_to_port = yield self._get_total_count_to_port(
-                    table, next_chunk
+                    table, forward_chunk, backward_chunk
                 )
         else:
             def delete_all(txn):
@@ -196,46 +209,85 @@ class Porter(object):
 
             yield self.postgres_store._simple_insert(
                 table="port_from_sqlite3",
-                values={"table_name": table, "rowid": 0}
+                values={
+                    "table_name": table,
+                    "forward_rowid": 1,
+                    "backward_rowid": 0,
+                }
             )
 
-            next_chunk = 1
+            forward_chunk = 1
+            backward_chunk = 0
 
             already_ported, total_to_port = yield self._get_total_count_to_port(
-                table, next_chunk
+                table, forward_chunk, backward_chunk
             )
 
-        defer.returnValue((table, already_ported, total_to_port, next_chunk))
+        defer.returnValue(
+            (table, already_ported, total_to_port, forward_chunk, backward_chunk)
+        )
 
     @defer.inlineCallbacks
-    def handle_table(self, table, postgres_size, table_size, next_chunk):
+    def handle_table(self, table, postgres_size, table_size, forward_chunk,
+                     backward_chunk):
         if not table_size:
             return
 
         self.progress.add_table(table, postgres_size, table_size)
 
         if table == "event_search":
-            yield self.handle_search_table(postgres_size, table_size, next_chunk)
+            yield self.handle_search_table(
+                postgres_size, table_size, forward_chunk, backward_chunk
+            )
             return
 
-        select = (
+        forward_select = (
             "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
             % (table,)
         )
 
+        backward_select = (
+            "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
+            % (table,)
+        )
+
+        do_forward = [True]
+        do_backward = [True]
+
         while True:
             def r(txn):
-                txn.execute(select, (next_chunk, self.batch_size,))
-                rows = txn.fetchall()
-                headers = [column[0] for column in txn.description]
+                forward_rows = []
+                backward_rows = []
+                if do_forward[0]:
+                    txn.execute(forward_select, (forward_chunk, self.batch_size,))
+                    forward_rows = txn.fetchall()
+                    if not forward_rows:
+                        do_forward[0] = False
+
+                if do_backward[0]:
+                    txn.execute(backward_select, (backward_chunk, self.batch_size,))
+                    backward_rows = txn.fetchall()
+                    if not backward_rows:
+                        do_backward[0] = False
+
+                if forward_rows or backward_rows:
+                    headers = [column[0] for column in txn.description]
+                else:
+                    headers = None
 
-                return headers, rows
+                return headers, forward_rows, backward_rows
 
-            headers, rows = yield self.sqlite_store.runInteraction("select", r)
+            headers, frows, brows = yield self.sqlite_store.runInteraction(
+                "select", r
+            )
 
-            if rows:
-                next_chunk = rows[-1][0] + 1
+            if frows or brows:
+                if frows:
+                    forward_chunk = max(row[0] for row in frows) + 1
+                if brows:
+                    backward_chunk = min(row[0] for row in brows) - 1
 
+                rows = frows + brows
                 self._convert_rows(table, headers, rows)
 
                 def insert(txn):
@@ -247,7 +299,10 @@ class Porter(object):
                         txn,
                         table="port_from_sqlite3",
                         keyvalues={"table_name": table},
-                        updatevalues={"rowid": next_chunk},
+                        updatevalues={
+                            "forward_rowid": forward_chunk,
+                            "backward_rowid": backward_chunk,
+                        },
                     )
 
                 yield self.postgres_store.execute(insert)
@@ -259,7 +314,8 @@ class Porter(object):
                 return
 
     @defer.inlineCallbacks
-    def handle_search_table(self, postgres_size, table_size, next_chunk):
+    def handle_search_table(self, postgres_size, table_size, forward_chunk,
+                            backward_chunk):
         select = (
             "SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
             " FROM event_search as es"
@@ -270,7 +326,7 @@ class Porter(object):
 
         while True:
             def r(txn):
-                txn.execute(select, (next_chunk, self.batch_size,))
+                txn.execute(select, (forward_chunk, self.batch_size,))
                 rows = txn.fetchall()
                 headers = [column[0] for column in txn.description]
 
@@ -279,7 +335,7 @@ class Porter(object):
             headers, rows = yield self.sqlite_store.runInteraction("select", r)
 
             if rows:
-                next_chunk = rows[-1][0] + 1
+                forward_chunk = rows[-1][0] + 1
 
                 # We have to treat event_search differently since it has a
                 # different structure in the two different databases.
@@ -312,7 +368,10 @@ class Porter(object):
                         txn,
                         table="port_from_sqlite3",
                         keyvalues={"table_name": "event_search"},
-                        updatevalues={"rowid": next_chunk},
+                        updatevalues={
+                            "forward_rowid": forward_chunk,
+                            "backward_rowid": backward_chunk,
+                        },
                     )
 
                 yield self.postgres_store.execute(insert)
@@ -324,7 +383,6 @@ class Porter(object):
             else:
                 return
 
-
     def setup_db(self, db_config, database_engine):
         db_conn = database_engine.module.connect(
             **{
@@ -395,10 +453,32 @@ class Porter(object):
                 txn.execute(
                     "CREATE TABLE port_from_sqlite3 ("
                     " table_name varchar(100) NOT NULL UNIQUE,"
-                    " rowid bigint NOT NULL"
+                    " forward_rowid bigint NOT NULL,"
+                    " backward_rowid bigint NOT NULL"
                     ")"
                 )
 
+            # The old port script created a table with just a "rowid" column.
+            # We want people to be able to rerun this script from an old port
+            # so that they can pick up any missing events that were not
+            # ported across.
+            def alter_table(txn):
+                txn.execute(
+                    "ALTER TABLE IF EXISTS port_from_sqlite3"
+                    " RENAME rowid TO forward_rowid"
+                )
+                txn.execute(
+                    "ALTER TABLE IF EXISTS port_from_sqlite3"
+                    " ADD backward_rowid bigint NOT NULL DEFAULT 0"
+                )
+
+            try:
+                yield self.postgres_store.runInteraction(
+                    "alter_table", alter_table
+                )
+            except Exception as e:
+                logger.info("Failed to create port table: %s", e)
+
             try:
                 yield self.postgres_store.runInteraction(
                     "create_port_table", create_port_table
@@ -458,7 +538,7 @@ class Porter(object):
     @defer.inlineCallbacks
     def _setup_sent_transactions(self):
         # Only save things from the last day
-        yesterday = int(time.time()*1000) - 86400000
+        yesterday = int(time.time() * 1000) - 86400000
 
         # And save the max transaction id from each destination
         select = (
@@ -514,7 +594,11 @@ class Porter(object):
 
         yield self.postgres_store._simple_insert(
             table="port_from_sqlite3",
-            values={"table_name": "sent_transactions", "rowid": next_chunk}
+            values={
+                "table_name": "sent_transactions",
+                "forward_rowid": next_chunk,
+                "backward_rowid": 0,
+            }
         )
 
         def get_sent_table_size(txn):
@@ -535,13 +619,18 @@ class Porter(object):
         defer.returnValue((next_chunk, inserted_rows, total_count))
 
     @defer.inlineCallbacks
-    def _get_remaining_count_to_port(self, table, next_chunk):
-        rows = yield self.sqlite_store.execute_sql(
+    def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
+        frows = yield self.sqlite_store.execute_sql(
             "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
-            next_chunk,
+            forward_chunk,
         )
 
-        defer.returnValue(rows[0][0])
+        brows = yield self.sqlite_store.execute_sql(
+            "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
+            backward_chunk,
+        )
+
+        defer.returnValue(frows[0][0] + brows[0][0])
 
     @defer.inlineCallbacks
     def _get_already_ported_count(self, table):
@@ -552,10 +641,10 @@ class Porter(object):
         defer.returnValue(rows[0][0])
 
     @defer.inlineCallbacks
-    def _get_total_count_to_port(self, table, next_chunk):
+    def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
         remaining, done = yield defer.gatherResults(
             [
-                self._get_remaining_count_to_port(table, next_chunk),
+                self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
                 self._get_already_ported_count(table),
             ],
             consumeErrors=True,
@@ -686,7 +775,7 @@ class CursesProgress(Progress):
             color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
 
             self.stdscr.addstr(
-                i+2, left_margin + max_len - len(table),
+                i + 2, left_margin + max_len - len(table),
                 table,
                 curses.A_BOLD | color,
             )
@@ -694,18 +783,18 @@ class CursesProgress(Progress):
             size = 20
 
             progress = "[%s%s]" % (
-                "#" * int(perc*size/100),
-                " " * (size - int(perc*size/100)),
+                "#" * int(perc * size / 100),
+                " " * (size - int(perc * size / 100)),
             )
 
             self.stdscr.addstr(
-                i+2, left_margin + max_len + middle_space,
+                i + 2, left_margin + max_len + middle_space,
                 "%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
             )
 
         if self.finished:
             self.stdscr.addstr(
-                rows-1, 0,
+                rows - 1, 0,
                 "Press any key to exit...",
             )
 
diff --git a/setup.cfg b/setup.cfg
index 5ebce1c56b..da8eafbb39 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -16,7 +16,5 @@ ignore =
 
 [flake8]
 max-line-length = 90
-ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
-
-[pep8]
-max-line-length = 90
+#  W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
+ignore = W503
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 7de51fbe8d..a63ee565cf 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -16,4 +16,4 @@
 """ This is a reference implementation of a Matrix home server.
 """
 
-__version__ = "0.14.0"
+__version__ = "0.17.0"
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 2474a1453b..0db26fcfd7 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -13,23 +13,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""This module contains classes for authenticating the user."""
+import logging
+
+import pymacaroons
 from canonicaljson import encode_canonical_json
 from signedjson.key import decode_verify_key_bytes
 from signedjson.sign import verify_signed_json, SignatureVerifyException
-
 from twisted.internet import defer
+from unpaddedbase64 import decode_base64
 
+import synapse.types
 from synapse.api.constants import EventTypes, Membership, JoinRules
 from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError
-from synapse.types import Requester, UserID, get_domain_from_id
-from synapse.util.logutils import log_function
+from synapse.types import UserID, get_domain_from_id
 from synapse.util.logcontext import preserve_context_over_fn
+from synapse.util.logutils import log_function
 from synapse.util.metrics import Measure
-from unpaddedbase64 import decode_base64
-
-import logging
-import pymacaroons
 
 logger = logging.getLogger(__name__)
 
@@ -42,13 +41,20 @@ AuthEventTypes = (
 
 
 class Auth(object):
-
+    """
+    FIXME: This class contains a mix of functions for authenticating users
+    of our client-server API and authenticating events added to room graphs.
+    """
     def __init__(self, hs):
         self.hs = hs
         self.clock = hs.get_clock()
         self.store = hs.get_datastore()
         self.state = hs.get_state_handler()
         self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
+        # Docs for these currently lives at
+        # https://github.com/matrix-org/matrix-doc/blob/master/drafts/macaroons_caveats.rst
+        # In addition, we have type == delete_pusher which grants access only to
+        # delete pushers.
         self._KNOWN_CAVEAT_PREFIXES = set([
             "gen = ",
             "guest = ",
@@ -57,7 +63,7 @@ class Auth(object):
             "user_id = ",
         ])
 
-    def check(self, event, auth_events):
+    def check(self, event, auth_events, do_sig_check=True):
         """ Checks if this event is correctly authed.
 
         Args:
@@ -73,6 +79,13 @@ class Auth(object):
 
             if not hasattr(event, "room_id"):
                 raise AuthError(500, "Event has no room_id: %s" % event)
+
+            sender_domain = get_domain_from_id(event.sender)
+
+            # Check the sender's domain has signed the event
+            if do_sig_check and not event.signatures.get(sender_domain):
+                raise AuthError(403, "Event not signed by sending server")
+
             if auth_events is None:
                 # Oh, we don't know what the state of the room was, so we
                 # are trusting that this is allowed (at least for now)
@@ -80,6 +93,12 @@ class Auth(object):
                 return True
 
             if event.type == EventTypes.Create:
+                room_id_domain = get_domain_from_id(event.room_id)
+                if room_id_domain != sender_domain:
+                    raise AuthError(
+                        403,
+                        "Creation event's room_id domain does not match sender's"
+                    )
                 # FIXME
                 return True
 
@@ -102,6 +121,22 @@ class Auth(object):
 
             # FIXME: Temp hack
             if event.type == EventTypes.Aliases:
+                if not event.is_state():
+                    raise AuthError(
+                        403,
+                        "Alias event must be a state event",
+                    )
+                if not event.state_key:
+                    raise AuthError(
+                        403,
+                        "Alias event must have non-empty state_key"
+                    )
+                sender_domain = get_domain_from_id(event.sender)
+                if event.state_key != sender_domain:
+                    raise AuthError(
+                        403,
+                        "Alias event's state_key does not match sender's domain"
+                    )
                 return True
 
             logger.debug(
@@ -120,6 +155,24 @@ class Auth(object):
                 return allowed
 
             self.check_event_sender_in_room(event, auth_events)
+
+            # Special case to allow m.room.third_party_invite events wherever
+            # a user is allowed to issue invites.  Fixes
+            # https://github.com/vector-im/vector-web/issues/1208 hopefully
+            if event.type == EventTypes.ThirdPartyInvite:
+                user_level = self._get_user_power_level(event.user_id, auth_events)
+                invite_level = self._get_named_level(auth_events, "invite", 0)
+
+                if user_level < invite_level:
+                    raise AuthError(
+                        403, (
+                            "You cannot issue a third party invite for %s." %
+                            (event.content.display_name,)
+                        )
+                    )
+                else:
+                    return True
+
             self._can_send_event(event, auth_events)
 
             if event.type == EventTypes.PowerLevels:
@@ -323,6 +376,10 @@ class Auth(object):
         if Membership.INVITE == membership and "third_party_invite" in event.content:
             if not self._verify_third_party_invite(event, auth_events):
                 raise AuthError(403, "You are not invited to this room.")
+            if target_banned:
+                raise AuthError(
+                    403, "%s is banned from the room" % (target_user_id,)
+                )
             return True
 
         if Membership.JOIN != membership:
@@ -507,15 +564,13 @@ class Auth(object):
             return default
 
     @defer.inlineCallbacks
-    def get_user_by_req(self, request, allow_guest=False):
+    def get_user_by_req(self, request, allow_guest=False, rights="access"):
         """ Get a registered user's ID.
 
         Args:
             request - An HTTP request with an access_token query parameter.
         Returns:
-            tuple of:
-                UserID (str)
-                Access token ID (str)
+            defer.Deferred: resolves to a ``synapse.types.Requester`` object
         Raises:
             AuthError if no user by that token exists or the token is invalid.
         """
@@ -524,16 +579,18 @@ class Auth(object):
             user_id = yield self._get_appservice_user_id(request.args)
             if user_id:
                 request.authenticated_entity = user_id
-                defer.returnValue(
-                    Requester(UserID.from_string(user_id), "", False)
-                )
+                defer.returnValue(synapse.types.create_requester(user_id))
 
             access_token = request.args["access_token"][0]
-            user_info = yield self.get_user_by_access_token(access_token)
+            user_info = yield self.get_user_by_access_token(access_token, rights)
             user = user_info["user"]
             token_id = user_info["token_id"]
             is_guest = user_info["is_guest"]
 
+            # device_id may not be present if get_user_by_access_token has been
+            # stubbed out.
+            device_id = user_info.get("device_id")
+
             ip_addr = self.hs.get_ip_from_request(request)
             user_agent = request.requestHeaders.getRawHeaders(
                 "User-Agent",
@@ -545,7 +602,8 @@ class Auth(object):
                     user=user,
                     access_token=access_token,
                     ip=ip_addr,
-                    user_agent=user_agent
+                    user_agent=user_agent,
+                    device_id=device_id,
                 )
 
             if is_guest and not allow_guest:
@@ -555,7 +613,8 @@ class Auth(object):
 
             request.authenticated_entity = user.to_string()
 
-            defer.returnValue(Requester(user, token_id, is_guest))
+            defer.returnValue(synapse.types.create_requester(
+                user, token_id, is_guest, device_id))
         except KeyError:
             raise AuthError(
                 self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.",
@@ -590,7 +649,7 @@ class Auth(object):
         defer.returnValue(user_id)
 
     @defer.inlineCallbacks
-    def get_user_by_access_token(self, token):
+    def get_user_by_access_token(self, token, rights="access"):
         """ Get a registered user's ID.
 
         Args:
@@ -601,47 +660,61 @@ class Auth(object):
             AuthError if no user by that token exists or the token is invalid.
         """
         try:
-            ret = yield self.get_user_from_macaroon(token)
+            ret = yield self.get_user_from_macaroon(token, rights)
         except AuthError:
             # TODO(daniel): Remove this fallback when all existing access tokens
             # have been re-issued as macaroons.
+            if self.hs.config.expire_access_token:
+                raise
             ret = yield self._look_up_user_by_access_token(token)
+
         defer.returnValue(ret)
 
     @defer.inlineCallbacks
-    def get_user_from_macaroon(self, macaroon_str):
+    def get_user_from_macaroon(self, macaroon_str, rights="access"):
         try:
             macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
 
-            self.validate_macaroon(macaroon, "access", self.hs.config.expire_access_token)
+            user_id = self.get_user_id_from_macaroon(macaroon)
+            user = UserID.from_string(user_id)
+
+            self.validate_macaroon(
+                macaroon, rights, self.hs.config.expire_access_token,
+                user_id=user_id,
+            )
 
-            user_prefix = "user_id = "
-            user = None
             guest = False
             for caveat in macaroon.caveats:
-                if caveat.caveat_id.startswith(user_prefix):
-                    user = UserID.from_string(caveat.caveat_id[len(user_prefix):])
-                elif caveat.caveat_id == "guest = true":
+                if caveat.caveat_id == "guest = true":
                     guest = True
 
-            if user is None:
-                raise AuthError(
-                    self.TOKEN_NOT_FOUND_HTTP_STATUS, "No user caveat in macaroon",
-                    errcode=Codes.UNKNOWN_TOKEN
-                )
-
             if guest:
                 ret = {
                     "user": user,
                     "is_guest": True,
                     "token_id": None,
+                    "device_id": None,
+                }
+            elif rights == "delete_pusher":
+                # We don't store these tokens in the database
+                ret = {
+                    "user": user,
+                    "is_guest": False,
+                    "token_id": None,
+                    "device_id": None,
                 }
             else:
-                # This codepath exists so that we can actually return a
-                # token ID, because we use token IDs in place of device
-                # identifiers throughout the codebase.
-                # TODO(daniel): Remove this fallback when device IDs are
-                # properly implemented.
+                # This codepath exists for several reasons:
+                #   * so that we can actually return a token ID, which is used
+                #     in some parts of the schema (where we probably ought to
+                #     use device IDs instead)
+                #   * the only way we currently have to invalidate an
+                #     access_token is by removing it from the database, so we
+                #     have to check here that it is still in the db
+                #   * some attributes (notably device_id) aren't stored in the
+                #     macaroon. They probably should be.
+                # TODO: build the dictionary from the macaroon once the
+                # above are fixed
                 ret = yield self._look_up_user_by_access_token(macaroon_str)
                 if ret["user"] != user:
                     logger.error(
@@ -661,21 +734,46 @@ class Auth(object):
                 errcode=Codes.UNKNOWN_TOKEN
             )
 
-    def validate_macaroon(self, macaroon, type_string, verify_expiry):
+    def get_user_id_from_macaroon(self, macaroon):
+        """Retrieve the user_id given by the caveats on the macaroon.
+
+        Does *not* validate the macaroon.
+
+        Args:
+            macaroon (pymacaroons.Macaroon): The macaroon to validate
+
+        Returns:
+            (str) user id
+
+        Raises:
+            AuthError if there is no user_id caveat in the macaroon
+        """
+        user_prefix = "user_id = "
+        for caveat in macaroon.caveats:
+            if caveat.caveat_id.startswith(user_prefix):
+                return caveat.caveat_id[len(user_prefix):]
+        raise AuthError(
+            self.TOKEN_NOT_FOUND_HTTP_STATUS, "No user caveat in macaroon",
+            errcode=Codes.UNKNOWN_TOKEN
+        )
+
+    def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
         """
         validate that a Macaroon is understood by and was signed by this server.
 
         Args:
             macaroon(pymacaroons.Macaroon): The macaroon to validate
-            type_string(str): The kind of token this is (e.g. "access", "refresh")
+            type_string(str): The kind of token required (e.g. "access", "refresh",
+                              "delete_pusher")
             verify_expiry(bool): Whether to verify whether the macaroon has expired.
                 This should really always be True, but no clients currently implement
                 token refresh, so we can't enforce expiry yet.
+            user_id (str): The user_id required
         """
         v = pymacaroons.Verifier()
         v.satisfy_exact("gen = 1")
         v.satisfy_exact("type = " + type_string)
-        v.satisfy_general(lambda c: c.startswith("user_id = "))
+        v.satisfy_exact("user_id = %s" % user_id)
         v.satisfy_exact("guest = true")
         if verify_expiry:
             v.satisfy_general(self._verify_expiry)
@@ -714,10 +812,14 @@ class Auth(object):
                 self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
                 errcode=Codes.UNKNOWN_TOKEN
             )
+        # we use ret.get() below because *lots* of unit tests stub out
+        # get_user_by_access_token in a way where it only returns a couple of
+        # the fields.
         user_info = {
             "user": UserID.from_string(ret.get("name")),
             "token_id": ret.get("token_id", None),
             "is_guest": False,
+            "device_id": ret.get("device_id"),
         }
         defer.returnValue(user_info)
 
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index b106fbed6d..0041646858 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -42,8 +42,10 @@ class Codes(object):
     TOO_LARGE = "M_TOO_LARGE"
     EXCLUSIVE = "M_EXCLUSIVE"
     THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
-    THREEPID_IN_USE = "THREEPID_IN_USE"
+    THREEPID_IN_USE = "M_THREEPID_IN_USE"
+    THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
     INVALID_USERNAME = "M_INVALID_USERNAME"
+    SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
 
 
 class CodeMessageException(RuntimeError):
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 4f5a4281fa..3b3ef70750 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -191,6 +191,17 @@ class Filter(object):
     def __init__(self, filter_json):
         self.filter_json = filter_json
 
+        self.types = self.filter_json.get("types", None)
+        self.not_types = self.filter_json.get("not_types", [])
+
+        self.rooms = self.filter_json.get("rooms", None)
+        self.not_rooms = self.filter_json.get("not_rooms", [])
+
+        self.senders = self.filter_json.get("senders", None)
+        self.not_senders = self.filter_json.get("not_senders", [])
+
+        self.contains_url = self.filter_json.get("contains_url", None)
+
     def check(self, event):
         """Checks whether the filter matches the given event.
 
@@ -209,9 +220,10 @@ class Filter(object):
             event.get("room_id", None),
             sender,
             event.get("type", None),
+            "url" in event.get("content", {})
         )
 
-    def check_fields(self, room_id, sender, event_type):
+    def check_fields(self, room_id, sender, event_type, contains_url):
         """Checks whether the filter matches the given event fields.
 
         Returns:
@@ -225,15 +237,20 @@ class Filter(object):
 
         for name, match_func in literal_keys.items():
             not_name = "not_%s" % (name,)
-            disallowed_values = self.filter_json.get(not_name, [])
+            disallowed_values = getattr(self, not_name)
             if any(map(match_func, disallowed_values)):
                 return False
 
-            allowed_values = self.filter_json.get(name, None)
+            allowed_values = getattr(self, name)
             if allowed_values is not None:
                 if not any(map(match_func, allowed_values)):
                     return False
 
+        contains_url_filter = self.filter_json.get("contains_url")
+        if contains_url_filter is not None:
+            if contains_url_filter != contains_url:
+                return False
+
         return True
 
     def filter_rooms(self, room_ids):
diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py
index 1bc4279807..9c2b627590 100644
--- a/synapse/app/__init__.py
+++ b/synapse/app/__init__.py
@@ -16,13 +16,11 @@
 import sys
 sys.dont_write_bytecode = True
 
-from synapse.python_dependencies import (
-    check_requirements, MissingRequirementError
-)  # NOQA
+from synapse import python_dependencies   # noqa: E402
 
 try:
-    check_requirements()
-except MissingRequirementError as e:
+    python_dependencies.check_requirements()
+except python_dependencies.MissingRequirementError as e:
     message = "\n".join([
         "Missing Requirement: %s" % (e.message,),
         "To install run:",
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
new file mode 100644
index 0000000000..7355499ae2
--- /dev/null
+++ b/synapse/app/federation_reader.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import synapse
+
+from synapse.config._base import ConfigError
+from synapse.config.homeserver import HomeServerConfig
+from synapse.config.logger import setup_logging
+from synapse.http.site import SynapseSite
+from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
+from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage.events import SlavedEventStore
+from synapse.replication.slave.storage.keys import SlavedKeyStore
+from synapse.replication.slave.storage.room import RoomStore
+from synapse.replication.slave.storage.transactions import TransactionStore
+from synapse.replication.slave.storage.directory import DirectoryStore
+from synapse.server import HomeServer
+from synapse.storage.engines import create_engine
+from synapse.util.async import sleep
+from synapse.util.httpresourcetree import create_resource_tree
+from synapse.util.logcontext import LoggingContext
+from synapse.util.manhole import manhole
+from synapse.util.rlimit import change_resource_limit
+from synapse.util.versionstring import get_version_string
+from synapse.api.urls import FEDERATION_PREFIX
+from synapse.federation.transport.server import TransportLayerServer
+from synapse.crypto import context_factory
+
+
+from twisted.internet import reactor, defer
+from twisted.web.resource import Resource
+
+from daemonize import Daemonize
+
+import sys
+import logging
+import gc
+
+logger = logging.getLogger("synapse.app.federation_reader")
+
+
+class FederationReaderSlavedStore(
+    SlavedEventStore,
+    SlavedKeyStore,
+    RoomStore,
+    DirectoryStore,
+    TransactionStore,
+    BaseSlavedStore,
+):
+    pass
+
+
+class FederationReaderServer(HomeServer):
+    def get_db_conn(self, run_new_connection=True):
+        # Any param beginning with cp_ is a parameter for adbapi, and should
+        # not be passed to the database engine.
+        db_params = {
+            k: v for k, v in self.db_config.get("args", {}).items()
+            if not k.startswith("cp_")
+        }
+        db_conn = self.database_engine.module.connect(**db_params)
+
+        if run_new_connection:
+            self.database_engine.on_new_connection(db_conn)
+        return db_conn
+
+    def setup(self):
+        logger.info("Setting up.")
+        self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
+        logger.info("Finished setting up.")
+
+    def _listen_http(self, listener_config):
+        port = listener_config["port"]
+        bind_address = listener_config.get("bind_address", "")
+        site_tag = listener_config.get("tag", port)
+        resources = {}
+        for res in listener_config["resources"]:
+            for name in res["names"]:
+                if name == "metrics":
+                    resources[METRICS_PREFIX] = MetricsResource(self)
+                elif name == "federation":
+                    resources.update({
+                        FEDERATION_PREFIX: TransportLayerServer(self),
+                    })
+
+        root_resource = create_resource_tree(resources, Resource())
+        reactor.listenTCP(
+            port,
+            SynapseSite(
+                "synapse.access.http.%s" % (site_tag,),
+                site_tag,
+                listener_config,
+                root_resource,
+            ),
+            interface=bind_address
+        )
+        logger.info("Synapse federation reader now listening on port %d", port)
+
+    def start_listening(self, listeners):
+        for listener in listeners:
+            if listener["type"] == "http":
+                self._listen_http(listener)
+            elif listener["type"] == "manhole":
+                reactor.listenTCP(
+                    listener["port"],
+                    manhole(
+                        username="matrix",
+                        password="rabbithole",
+                        globals={"hs": self},
+                    ),
+                    interface=listener.get("bind_address", '127.0.0.1')
+                )
+            else:
+                logger.warn("Unrecognized listener type: %s", listener["type"])
+
+    @defer.inlineCallbacks
+    def replicate(self):
+        http_client = self.get_simple_http_client()
+        store = self.get_datastore()
+        replication_url = self.config.worker_replication_url
+
+        while True:
+            try:
+                args = store.stream_positions()
+                args["timeout"] = 30000
+                result = yield http_client.get_json(replication_url, args=args)
+                yield store.process_replication(result)
+            except:
+                logger.exception("Error replicating from %r", replication_url)
+                yield sleep(5)
+
+
+def start(config_options):
+    try:
+        config = HomeServerConfig.load_config(
+            "Synapse federation reader", config_options
+        )
+    except ConfigError as e:
+        sys.stderr.write("\n" + e.message + "\n")
+        sys.exit(1)
+
+    assert config.worker_app == "synapse.app.federation_reader"
+
+    setup_logging(config.worker_log_config, config.worker_log_file)
+
+    database_engine = create_engine(config.database_config)
+
+    tls_server_context_factory = context_factory.ServerContextFactory(config)
+
+    ss = FederationReaderServer(
+        config.server_name,
+        db_config=config.database_config,
+        tls_server_context_factory=tls_server_context_factory,
+        config=config,
+        version_string="Synapse/" + get_version_string(synapse),
+        database_engine=database_engine,
+    )
+
+    ss.setup()
+    ss.get_handlers()
+    ss.start_listening(config.worker_listeners)
+
+    def run():
+        with LoggingContext("run"):
+            logger.info("Running")
+            change_resource_limit(config.soft_file_limit)
+            if config.gc_thresholds:
+                gc.set_threshold(*config.gc_thresholds)
+            reactor.run()
+
+    def start():
+        ss.get_datastore().start_profiling()
+        ss.replicate()
+
+    reactor.callWhenRunning(start)
+
+    if config.worker_daemonize:
+        daemon = Daemonize(
+            app="synapse-federation-reader",
+            pid=config.worker_pid_file,
+            action=run,
+            auto_close_fds=False,
+            verbose=True,
+            logger=logger,
+        )
+        daemon.start()
+    else:
+        run()
+
+
+if __name__ == '__main__':
+    with LoggingContext("main"):
+        start(sys.argv[1:])
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index df675c0ed4..40e6f65236 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -16,6 +16,7 @@
 
 import synapse
 
+import gc
 import logging
 import os
 import sys
@@ -50,6 +51,7 @@ from synapse.api.urls import (
 from synapse.config.homeserver import HomeServerConfig
 from synapse.crypto import context_factory
 from synapse.util.logcontext import LoggingContext
+from synapse.metrics import register_memory_metrics
 from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
 from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
 from synapse.federation.transport.server import TransportLayerServer
@@ -146,7 +148,7 @@ class SynapseHomeServer(HomeServer):
                         MEDIA_PREFIX: media_repo,
                         LEGACY_MEDIA_PREFIX: media_repo,
                         CONTENT_REPO_PREFIX: ContentRepoResource(
-                            self, self.config.uploads_path, self.auth, self.content_addr
+                            self, self.config.uploads_path
                         ),
                     })
 
@@ -265,10 +267,9 @@ def setup(config_options):
         HomeServer
     """
     try:
-        config = HomeServerConfig.load_config(
+        config = HomeServerConfig.load_or_generate_config(
             "Synapse Homeserver",
             config_options,
-            generate_section="Homeserver"
         )
     except ConfigError as e:
         sys.stderr.write("\n" + e.message + "\n")
@@ -284,7 +285,7 @@ def setup(config_options):
     # check any extra requirements we have now we have a config
     check_requirements(config)
 
-    version_string = get_version_string("Synapse", synapse)
+    version_string = "Synapse/" + get_version_string(synapse)
 
     logger.info("Server hostname: %s", config.server_name)
     logger.info("Server version: %s", version_string)
@@ -301,7 +302,6 @@ def setup(config_options):
         db_config=config.database_config,
         tls_server_context_factory=tls_server_context_factory,
         config=config,
-        content_addr=config.content_addr,
         version_string=version_string,
         database_engine=database_engine,
     )
@@ -336,6 +336,8 @@ def setup(config_options):
         hs.get_datastore().start_doing_background_updates()
         hs.get_replication_layer().start_get_pdu_cache()
 
+        register_memory_metrics(hs)
+
     reactor.callWhenRunning(start)
 
     return hs
@@ -351,6 +353,8 @@ class SynapseService(service.Service):
     def startService(self):
         hs = setup(self.config)
         change_resource_limit(hs.config.soft_file_limit)
+        if hs.config.gc_thresholds:
+            gc.set_threshold(*hs.config.gc_thresholds)
 
     def stopService(self):
         return self._port.stopListening()
@@ -422,6 +426,8 @@ def run(hs):
         # sys.settrace(logcontext_tracer)
         with LoggingContext("run"):
             change_resource_limit(hs.config.soft_file_limit)
+            if hs.config.gc_thresholds:
+                gc.set_threshold(*hs.config.gc_thresholds)
             reactor.run()
 
     if hs.config.daemonize:
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 135dd58c15..c8dde0fcb8 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -18,9 +18,8 @@ import synapse
 
 from synapse.server import HomeServer
 from synapse.config._base import ConfigError
-from synapse.config.database import DatabaseConfig
-from synapse.config.logger import LoggingConfig
-from synapse.config.emailconfig import EmailConfig
+from synapse.config.logger import setup_logging
+from synapse.config.homeserver import HomeServerConfig
 from synapse.http.site import SynapseSite
 from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
 from synapse.storage.roommember import RoomMemberStore
@@ -44,61 +43,11 @@ from daemonize import Daemonize
 
 import sys
 import logging
+import gc
 
 logger = logging.getLogger("synapse.app.pusher")
 
 
-class SlaveConfig(DatabaseConfig):
-    def read_config(self, config):
-        self.replication_url = config["replication_url"]
-        self.server_name = config["server_name"]
-        self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
-            "use_insecure_ssl_client_just_for_testing_do_not_use", False
-        )
-        self.user_agent_suffix = None
-        self.start_pushers = True
-        self.listeners = config["listeners"]
-        self.soft_file_limit = config.get("soft_file_limit")
-        self.daemonize = config.get("daemonize")
-        self.pid_file = self.abspath(config.get("pid_file"))
-        self.public_baseurl = config["public_baseurl"]
-
-    def default_config(self, server_name, **kwargs):
-        pid_file = self.abspath("pusher.pid")
-        return """\
-        # Slave configuration
-
-        # The replication listener on the synapse to talk to.
-        #replication_url: https://localhost:{replication_port}/_synapse/replication
-
-        server_name: "%(server_name)s"
-
-        listeners: []
-        # Enable a ssh manhole listener on the pusher.
-        # - type: manhole
-        #   port: {manhole_port}
-        #   bind_address: 127.0.0.1
-        # Enable a metric listener on the pusher.
-        # - type: http
-        #   port: {metrics_port}
-        #   bind_address: 127.0.0.1
-        #   resources:
-        #    - names: ["metrics"]
-        #      compress: False
-
-        report_stats: False
-
-        daemonize: False
-
-        pid_file: %(pid_file)s
-
-        """ % locals()
-
-
-class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig):
-    pass
-
-
 class PusherSlaveStore(
     SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
     SlavedAccountDataStore
@@ -163,7 +112,7 @@ class PusherServer(HomeServer):
 
     def remove_pusher(self, app_id, push_key, user_id):
         http_client = self.get_simple_http_client()
-        replication_url = self.config.replication_url
+        replication_url = self.config.worker_replication_url
         url = replication_url + "/remove_pushers"
         return http_client.post_json_get_json(url, {
             "remove": [{
@@ -196,8 +145,8 @@ class PusherServer(HomeServer):
         )
         logger.info("Synapse pusher now listening on port %d", port)
 
-    def start_listening(self):
-        for listener in self.config.listeners:
+    def start_listening(self, listeners):
+        for listener in listeners:
             if listener["type"] == "http":
                 self._listen_http(listener)
             elif listener["type"] == "manhole":
@@ -217,7 +166,7 @@ class PusherServer(HomeServer):
     def replicate(self):
         http_client = self.get_simple_http_client()
         store = self.get_datastore()
-        replication_url = self.config.replication_url
+        replication_url = self.config.worker_replication_url
         pusher_pool = self.get_pusherpool()
         clock = self.get_clock()
 
@@ -290,22 +239,33 @@ class PusherServer(HomeServer):
                 poke_pushers(result)
             except:
                 logger.exception("Error replicating from %r", replication_url)
-                sleep(30)
+                yield sleep(30)
 
 
-def setup(config_options):
+def start(config_options):
     try:
-        config = PusherSlaveConfig.load_config(
+        config = HomeServerConfig.load_config(
             "Synapse pusher", config_options
         )
     except ConfigError as e:
         sys.stderr.write("\n" + e.message + "\n")
         sys.exit(1)
 
-    if not config:
-        sys.exit(0)
+    assert config.worker_app == "synapse.app.pusher"
+
+    setup_logging(config.worker_log_config, config.worker_log_file)
+
+    if config.start_pushers:
+        sys.stderr.write(
+            "\nThe pushers must be disabled in the main synapse process"
+            "\nbefore they can be run in a separate worker."
+            "\nPlease add ``start_pushers: false`` to the main config"
+            "\n"
+        )
+        sys.exit(1)
 
-    config.setup_logging()
+    # Force the pushers to start since they will be disabled in the main config
+    config.start_pushers = True
 
     database_engine = create_engine(config.database_config)
 
@@ -313,14 +273,20 @@ def setup(config_options):
         config.server_name,
         db_config=config.database_config,
         config=config,
-        version_string=get_version_string("Synapse", synapse),
+        version_string="Synapse/" + get_version_string(synapse),
         database_engine=database_engine,
     )
 
     ps.setup()
-    ps.start_listening()
-
-    change_resource_limit(ps.config.soft_file_limit)
+    ps.start_listening(config.worker_listeners)
+
+    def run():
+        with LoggingContext("run"):
+            logger.info("Running")
+            change_resource_limit(config.soft_file_limit)
+            if config.gc_thresholds:
+                gc.set_threshold(*config.gc_thresholds)
+            reactor.run()
 
     def start():
         ps.replicate()
@@ -329,28 +295,20 @@ def setup(config_options):
 
     reactor.callWhenRunning(start)
 
-    return ps
+    if config.worker_daemonize:
+        daemon = Daemonize(
+            app="synapse-pusher",
+            pid=config.worker_pid_file,
+            action=run,
+            auto_close_fds=False,
+            verbose=True,
+            logger=logger,
+        )
+        daemon.start()
+    else:
+        run()
 
 
 if __name__ == '__main__':
     with LoggingContext("main"):
-        ps = setup(sys.argv[1:])
-
-        if ps.config.daemonize:
-            def run():
-                with LoggingContext("run"):
-                    change_resource_limit(ps.config.soft_file_limit)
-                    reactor.run()
-
-            daemon = Daemonize(
-                app="synapse-pusher",
-                pid=ps.config.pid_file,
-                action=run,
-                auto_close_fds=False,
-                verbose=True,
-                logger=logger,
-            )
-
-            daemon.start()
-        else:
-            reactor.run()
+        ps = start(sys.argv[1:])
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
new file mode 100644
index 0000000000..215ccfd522
--- /dev/null
+++ b/synapse/app/synchrotron.py
@@ -0,0 +1,465 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import synapse
+
+from synapse.api.constants import EventTypes, PresenceState
+from synapse.config._base import ConfigError
+from synapse.config.homeserver import HomeServerConfig
+from synapse.config.logger import setup_logging
+from synapse.events import FrozenEvent
+from synapse.handlers.presence import PresenceHandler
+from synapse.http.site import SynapseSite
+from synapse.http.server import JsonResource
+from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
+from synapse.rest.client.v2_alpha import sync
+from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage.events import SlavedEventStore
+from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
+from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
+from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
+from synapse.replication.slave.storage.registration import SlavedRegistrationStore
+from synapse.replication.slave.storage.filtering import SlavedFilteringStore
+from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
+from synapse.replication.slave.storage.presence import SlavedPresenceStore
+from synapse.server import HomeServer
+from synapse.storage.client_ips import ClientIpStore
+from synapse.storage.engines import create_engine
+from synapse.storage.presence import PresenceStore, UserPresenceState
+from synapse.storage.roommember import RoomMemberStore
+from synapse.util.async import sleep
+from synapse.util.httpresourcetree import create_resource_tree
+from synapse.util.logcontext import LoggingContext, preserve_fn
+from synapse.util.manhole import manhole
+from synapse.util.rlimit import change_resource_limit
+from synapse.util.stringutils import random_string
+from synapse.util.versionstring import get_version_string
+
+from twisted.internet import reactor, defer
+from twisted.web.resource import Resource
+
+from daemonize import Daemonize
+
+import sys
+import logging
+import contextlib
+import gc
+import ujson as json
+
+logger = logging.getLogger("synapse.app.synchrotron")
+
+
+class SynchrotronSlavedStore(
+    SlavedPushRuleStore,
+    SlavedEventStore,
+    SlavedReceiptsStore,
+    SlavedAccountDataStore,
+    SlavedApplicationServiceStore,
+    SlavedRegistrationStore,
+    SlavedFilteringStore,
+    SlavedPresenceStore,
+    BaseSlavedStore,
+    ClientIpStore,  # After BaseSlavedStore because the constructor is different
+):
+    # XXX: This is a bit broken because we don't persist forgotten rooms
+    # in a way that they can be streamed. This means that we don't have a
+    # way to invalidate the forgotten rooms cache correctly.
+    # For now we expire the cache every 10 minutes.
+    BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000
+    who_forgot_in_room = (
+        RoomMemberStore.__dict__["who_forgot_in_room"]
+    )
+
+    # XXX: This is a bit broken because we don't persist the accepted list in a
+    # way that can be replicated. This means that we don't have a way to
+    # invalidate the cache correctly.
+    get_presence_list_accepted = PresenceStore.__dict__[
+        "get_presence_list_accepted"
+    ]
+
+UPDATE_SYNCING_USERS_MS = 10 * 1000
+
+
+class SynchrotronPresence(object):
+    def __init__(self, hs):
+        self.http_client = hs.get_simple_http_client()
+        self.store = hs.get_datastore()
+        self.user_to_num_current_syncs = {}
+        self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
+        self.clock = hs.get_clock()
+
+        active_presence = self.store.take_presence_startup_info()
+        self.user_to_current_state = {
+            state.user_id: state
+            for state in active_presence
+        }
+
+        self.process_id = random_string(16)
+        logger.info("Presence process_id is %r", self.process_id)
+
+        self._sending_sync = False
+        self._need_to_send_sync = False
+        self.clock.looping_call(
+            self._send_syncing_users_regularly,
+            UPDATE_SYNCING_USERS_MS,
+        )
+
+        reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
+
+    def set_state(self, user, state):
+        # TODO Hows this supposed to work?
+        pass
+
+    get_states = PresenceHandler.get_states.__func__
+    current_state_for_users = PresenceHandler.current_state_for_users.__func__
+
+    @defer.inlineCallbacks
+    def user_syncing(self, user_id, affect_presence):
+        if affect_presence:
+            curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
+            self.user_to_num_current_syncs[user_id] = curr_sync + 1
+            prev_states = yield self.current_state_for_users([user_id])
+            if prev_states[user_id].state == PresenceState.OFFLINE:
+                # TODO: Don't block the sync request on this HTTP hit.
+                yield self._send_syncing_users_now()
+
+        def _end():
+            # We check that the user_id is in user_to_num_current_syncs because
+            # user_to_num_current_syncs may have been cleared if we are
+            # shutting down.
+            if affect_presence and user_id in self.user_to_num_current_syncs:
+                self.user_to_num_current_syncs[user_id] -= 1
+
+        @contextlib.contextmanager
+        def _user_syncing():
+            try:
+                yield
+            finally:
+                _end()
+
+        defer.returnValue(_user_syncing())
+
+    @defer.inlineCallbacks
+    def _on_shutdown(self):
+        # When the synchrotron is shutdown tell the master to clear the in
+        # progress syncs for this process
+        self.user_to_num_current_syncs.clear()
+        yield self._send_syncing_users_now()
+
+    def _send_syncing_users_regularly(self):
+        # Only send an update if we aren't in the middle of sending one.
+        if not self._sending_sync:
+            preserve_fn(self._send_syncing_users_now)()
+
+    @defer.inlineCallbacks
+    def _send_syncing_users_now(self):
+        if self._sending_sync:
+            # We don't want to race with sending another update.
+            # Instead we wait for that update to finish and send another
+            # update afterwards.
+            self._need_to_send_sync = True
+            return
+
+        # Flag that we are sending an update.
+        self._sending_sync = True
+
+        yield self.http_client.post_json_get_json(self.syncing_users_url, {
+            "process_id": self.process_id,
+            "syncing_users": [
+                user_id for user_id, count in self.user_to_num_current_syncs.items()
+                if count > 0
+            ],
+        })
+
+        # Unset the flag as we are no longer sending an update.
+        self._sending_sync = False
+        if self._need_to_send_sync:
+            # If something happened while we were sending the update then
+            # we might need to send another update.
+            # TODO: Check if the update that was sent matches the current state
+            # as we only need to send an update if they are different.
+            self._need_to_send_sync = False
+            yield self._send_syncing_users_now()
+
+    def process_replication(self, result):
+        stream = result.get("presence", {"rows": []})
+        for row in stream["rows"]:
+            (
+                position, user_id, state, last_active_ts,
+                last_federation_update_ts, last_user_sync_ts, status_msg,
+                currently_active
+            ) = row
+            self.user_to_current_state[user_id] = UserPresenceState(
+                user_id, state, last_active_ts,
+                last_federation_update_ts, last_user_sync_ts, status_msg,
+                currently_active
+            )
+
+
+class SynchrotronTyping(object):
+    def __init__(self, hs):
+        self._latest_room_serial = 0
+        self._room_serials = {}
+        self._room_typing = {}
+
+    def stream_positions(self):
+        return {"typing": self._latest_room_serial}
+
+    def process_replication(self, result):
+        stream = result.get("typing")
+        if stream:
+            self._latest_room_serial = int(stream["position"])
+
+            for row in stream["rows"]:
+                position, room_id, typing_json = row
+                typing = json.loads(typing_json)
+                self._room_serials[room_id] = position
+                self._room_typing[room_id] = typing
+
+
+class SynchrotronApplicationService(object):
+    def notify_interested_services(self, event):
+        pass
+
+
+class SynchrotronServer(HomeServer):
+    def get_db_conn(self, run_new_connection=True):
+        # Any param beginning with cp_ is a parameter for adbapi, and should
+        # not be passed to the database engine.
+        db_params = {
+            k: v for k, v in self.db_config.get("args", {}).items()
+            if not k.startswith("cp_")
+        }
+        db_conn = self.database_engine.module.connect(**db_params)
+
+        if run_new_connection:
+            self.database_engine.on_new_connection(db_conn)
+        return db_conn
+
+    def setup(self):
+        logger.info("Setting up.")
+        self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
+        logger.info("Finished setting up.")
+
+    def _listen_http(self, listener_config):
+        port = listener_config["port"]
+        bind_address = listener_config.get("bind_address", "")
+        site_tag = listener_config.get("tag", port)
+        resources = {}
+        for res in listener_config["resources"]:
+            for name in res["names"]:
+                if name == "metrics":
+                    resources[METRICS_PREFIX] = MetricsResource(self)
+                elif name == "client":
+                    resource = JsonResource(self, canonical_json=False)
+                    sync.register_servlets(self, resource)
+                    resources.update({
+                        "/_matrix/client/r0": resource,
+                        "/_matrix/client/unstable": resource,
+                        "/_matrix/client/v2_alpha": resource,
+                    })
+
+        root_resource = create_resource_tree(resources, Resource())
+        reactor.listenTCP(
+            port,
+            SynapseSite(
+                "synapse.access.http.%s" % (site_tag,),
+                site_tag,
+                listener_config,
+                root_resource,
+            ),
+            interface=bind_address
+        )
+        logger.info("Synapse synchrotron now listening on port %d", port)
+
+    def start_listening(self, listeners):
+        for listener in listeners:
+            if listener["type"] == "http":
+                self._listen_http(listener)
+            elif listener["type"] == "manhole":
+                reactor.listenTCP(
+                    listener["port"],
+                    manhole(
+                        username="matrix",
+                        password="rabbithole",
+                        globals={"hs": self},
+                    ),
+                    interface=listener.get("bind_address", '127.0.0.1')
+                )
+            else:
+                logger.warn("Unrecognized listener type: %s", listener["type"])
+
+    @defer.inlineCallbacks
+    def replicate(self):
+        http_client = self.get_simple_http_client()
+        store = self.get_datastore()
+        replication_url = self.config.worker_replication_url
+        clock = self.get_clock()
+        notifier = self.get_notifier()
+        presence_handler = self.get_presence_handler()
+        typing_handler = self.get_typing_handler()
+
+        def expire_broken_caches():
+            store.who_forgot_in_room.invalidate_all()
+            store.get_presence_list_accepted.invalidate_all()
+
+        def notify_from_stream(
+            result, stream_name, stream_key, room=None, user=None
+        ):
+            stream = result.get(stream_name)
+            if stream:
+                position_index = stream["field_names"].index("position")
+                if room:
+                    room_index = stream["field_names"].index(room)
+                if user:
+                    user_index = stream["field_names"].index(user)
+
+                users = ()
+                rooms = ()
+                for row in stream["rows"]:
+                    position = row[position_index]
+
+                    if user:
+                        users = (row[user_index],)
+
+                    if room:
+                        rooms = (row[room_index],)
+
+                    notifier.on_new_event(
+                        stream_key, position, users=users, rooms=rooms
+                    )
+
+        def notify(result):
+            stream = result.get("events")
+            if stream:
+                max_position = stream["position"]
+                for row in stream["rows"]:
+                    position = row[0]
+                    internal = json.loads(row[1])
+                    event_json = json.loads(row[2])
+                    event = FrozenEvent(event_json, internal_metadata_dict=internal)
+                    extra_users = ()
+                    if event.type == EventTypes.Member:
+                        extra_users = (event.state_key,)
+                    notifier.on_new_room_event(
+                        event, position, max_position, extra_users
+                    )
+
+            notify_from_stream(
+                result, "push_rules", "push_rules_key", user="user_id"
+            )
+            notify_from_stream(
+                result, "user_account_data", "account_data_key", user="user_id"
+            )
+            notify_from_stream(
+                result, "room_account_data", "account_data_key", user="user_id"
+            )
+            notify_from_stream(
+                result, "tag_account_data", "account_data_key", user="user_id"
+            )
+            notify_from_stream(
+                result, "receipts", "receipt_key", room="room_id"
+            )
+            notify_from_stream(
+                result, "typing", "typing_key", room="room_id"
+            )
+
+        next_expire_broken_caches_ms = 0
+        while True:
+            try:
+                args = store.stream_positions()
+                args.update(typing_handler.stream_positions())
+                args["timeout"] = 30000
+                result = yield http_client.get_json(replication_url, args=args)
+                now_ms = clock.time_msec()
+                if now_ms > next_expire_broken_caches_ms:
+                    expire_broken_caches()
+                    next_expire_broken_caches_ms = (
+                        now_ms + store.BROKEN_CACHE_EXPIRY_MS
+                    )
+                yield store.process_replication(result)
+                typing_handler.process_replication(result)
+                presence_handler.process_replication(result)
+                notify(result)
+            except:
+                logger.exception("Error replicating from %r", replication_url)
+                yield sleep(5)
+
+    def build_presence_handler(self):
+        return SynchrotronPresence(self)
+
+    def build_typing_handler(self):
+        return SynchrotronTyping(self)
+
+
+def start(config_options):
+    try:
+        config = HomeServerConfig.load_config(
+            "Synapse synchrotron", config_options
+        )
+    except ConfigError as e:
+        sys.stderr.write("\n" + e.message + "\n")
+        sys.exit(1)
+
+    assert config.worker_app == "synapse.app.synchrotron"
+
+    setup_logging(config.worker_log_config, config.worker_log_file)
+
+    database_engine = create_engine(config.database_config)
+
+    ss = SynchrotronServer(
+        config.server_name,
+        db_config=config.database_config,
+        config=config,
+        version_string="Synapse/" + get_version_string(synapse),
+        database_engine=database_engine,
+        application_service_handler=SynchrotronApplicationService(),
+    )
+
+    ss.setup()
+    ss.start_listening(config.worker_listeners)
+
+    def run():
+        with LoggingContext("run"):
+            logger.info("Running")
+            change_resource_limit(config.soft_file_limit)
+            if config.gc_thresholds:
+                gc.set_threshold(*config.gc_thresholds)
+            reactor.run()
+
+    def start():
+        ss.get_datastore().start_profiling()
+        ss.replicate()
+
+    reactor.callWhenRunning(start)
+
+    if config.worker_daemonize:
+        daemon = Daemonize(
+            app="synapse-synchrotron",
+            pid=config.worker_pid_file,
+            action=run,
+            auto_close_fds=False,
+            verbose=True,
+            logger=logger,
+        )
+        daemon.start()
+    else:
+        run()
+
+
+if __name__ == '__main__':
+    with LoggingContext("main"):
+        start(sys.argv[1:])
diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py
index 39f4bf6e53..bb41962d47 100755
--- a/synapse/app/synctl.py
+++ b/synapse/app/synctl.py
@@ -14,11 +14,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import sys
+import argparse
+import collections
+import glob
 import os
 import os.path
-import subprocess
 import signal
+import subprocess
+import sys
 import yaml
 
 SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
@@ -28,60 +31,181 @@ RED = "\x1b[1;31m"
 NORMAL = "\x1b[m"
 
 
+def write(message, colour=NORMAL, stream=sys.stdout):
+    if colour == NORMAL:
+        stream.write(message + "\n")
+    else:
+        stream.write(colour + message + NORMAL + "\n")
+
+
 def start(configfile):
-    print ("Starting ...")
+    write("Starting ...")
     args = SYNAPSE
     args.extend(["--daemonize", "-c", configfile])
 
     try:
         subprocess.check_call(args)
-        print (GREEN + "started" + NORMAL)
+        write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN)
+    except subprocess.CalledProcessError as e:
+        write(
+            "error starting (exit code: %d); see above for logs" % e.returncode,
+            colour=RED,
+        )
+
+
+def start_worker(app, configfile, worker_configfile):
+    args = [
+        "python", "-B",
+        "-m", app,
+        "-c", configfile,
+        "-c", worker_configfile
+    ]
+
+    try:
+        subprocess.check_call(args)
+        write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
     except subprocess.CalledProcessError as e:
-        print (
-            RED +
-            "error starting (exit code: %d); see above for logs" % e.returncode +
-            NORMAL
+        write(
+            "error starting %s(%r) (exit code: %d); see above for logs" % (
+                app, worker_configfile, e.returncode,
+            ),
+            colour=RED,
         )
 
 
-def stop(pidfile):
+def stop(pidfile, app):
     if os.path.exists(pidfile):
         pid = int(open(pidfile).read())
         os.kill(pid, signal.SIGTERM)
-        print (GREEN + "stopped" + NORMAL)
+        write("stopped %s" % (app,), colour=GREEN)
+
+
+Worker = collections.namedtuple("Worker", [
+    "app", "configfile", "pidfile", "cache_factor"
+])
 
 
 def main():
-    configfile = sys.argv[2] if len(sys.argv) == 3 else "homeserver.yaml"
+
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument(
+        "action",
+        choices=["start", "stop", "restart"],
+        help="whether to start, stop or restart the synapse",
+    )
+    parser.add_argument(
+        "configfile",
+        nargs="?",
+        default="homeserver.yaml",
+        help="the homeserver config file, defaults to homserver.yaml",
+    )
+    parser.add_argument(
+        "-w", "--worker",
+        metavar="WORKERCONFIG",
+        help="start or stop a single worker",
+    )
+    parser.add_argument(
+        "-a", "--all-processes",
+        metavar="WORKERCONFIGDIR",
+        help="start or stop all the workers in the given directory"
+             " and the main synapse process",
+    )
+
+    options = parser.parse_args()
+
+    if options.worker and options.all_processes:
+        write(
+            'Cannot use "--worker" with "--all-processes"',
+            stream=sys.stderr
+        )
+        sys.exit(1)
+
+    configfile = options.configfile
 
     if not os.path.exists(configfile):
-        sys.stderr.write(
+        write(
             "No config file found\n"
             "To generate a config file, run '%s -c %s --generate-config"
             " --server-name=<server name>'\n" % (
-                " ".join(SYNAPSE), configfile
-            )
+                " ".join(SYNAPSE), options.configfile
+            ),
+            stream=sys.stderr,
         )
         sys.exit(1)
 
-    config = yaml.load(open(configfile))
+    with open(configfile) as stream:
+        config = yaml.load(stream)
+
     pidfile = config["pid_file"]
-    cache_factor = config.get("synctl_cache_factor", None)
+    cache_factor = config.get("synctl_cache_factor")
+    start_stop_synapse = True
 
     if cache_factor:
         os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
 
-    action = sys.argv[1] if sys.argv[1:] else "usage"
-    if action == "start":
-        start(configfile)
-    elif action == "stop":
-        stop(pidfile)
-    elif action == "restart":
-        stop(pidfile)
-        start(configfile)
-    else:
-        sys.stderr.write("Usage: %s [start|stop|restart] [configfile]\n" % (sys.argv[0],))
-        sys.exit(1)
+    worker_configfiles = []
+    if options.worker:
+        start_stop_synapse = False
+        worker_configfile = options.worker
+        if not os.path.exists(worker_configfile):
+            write(
+                "No worker config found at %r" % (worker_configfile,),
+                stream=sys.stderr,
+            )
+            sys.exit(1)
+        worker_configfiles.append(worker_configfile)
+
+    if options.all_processes:
+        worker_configdir = options.all_processes
+        if not os.path.isdir(worker_configdir):
+            write(
+                "No worker config directory found at %r" % (worker_configdir,),
+                stream=sys.stderr,
+            )
+            sys.exit(1)
+        worker_configfiles.extend(sorted(glob.glob(
+            os.path.join(worker_configdir, "*.yaml")
+        )))
+
+    workers = []
+    for worker_configfile in worker_configfiles:
+        with open(worker_configfile) as stream:
+            worker_config = yaml.load(stream)
+        worker_app = worker_config["worker_app"]
+        worker_pidfile = worker_config["worker_pid_file"]
+        worker_daemonize = worker_config["worker_daemonize"]
+        assert worker_daemonize  # TODO print something more user friendly
+        worker_cache_factor = worker_config.get("synctl_cache_factor")
+        workers.append(Worker(
+            worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
+        ))
+
+    action = options.action
+
+    if action == "stop" or action == "restart":
+        for worker in workers:
+            stop(worker.pidfile, worker.app)
+
+        if start_stop_synapse:
+            stop(pidfile, "synapse.app.homeserver")
+
+        # TODO: Wait for synapse to actually shutdown before starting it again
+
+    if action == "start" or action == "restart":
+        if start_stop_synapse:
+            start(configfile)
+
+        for worker in workers:
+            if worker.cache_factor:
+                os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
+
+            start_worker(worker.app, configfile, worker.configfile)
+
+            if cache_factor:
+                os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
+            else:
+                os.environ.pop("SYNAPSE_CACHE_FACTOR", None)
 
 
 if __name__ == "__main__":
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 47a4e9f864..9afc8fd754 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -56,22 +56,22 @@ import logging
 logger = logging.getLogger(__name__)
 
 
-class AppServiceScheduler(object):
+class ApplicationServiceScheduler(object):
     """ Public facing API for this module. Does the required DI to tie the
     components together. This also serves as the "event_pool", which in this
     case is a simple array.
     """
 
-    def __init__(self, clock, store, as_api):
-        self.clock = clock
-        self.store = store
-        self.as_api = as_api
+    def __init__(self, hs):
+        self.clock = hs.get_clock()
+        self.store = hs.get_datastore()
+        self.as_api = hs.get_application_service_api()
 
         def create_recoverer(service, callback):
-            return _Recoverer(clock, store, as_api, service, callback)
+            return _Recoverer(self.clock, self.store, self.as_api, service, callback)
 
         self.txn_ctrl = _TransactionController(
-            clock, store, as_api, create_recoverer
+            self.clock, self.store, self.as_api, create_recoverer
         )
         self.queuer = _ServiceQueuer(self.txn_ctrl)
 
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 7449f36491..af9f17bf7b 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -157,9 +157,40 @@ class Config(object):
         return default_config, config
 
     @classmethod
-    def load_config(cls, description, argv, generate_section=None):
+    def load_config(cls, description, argv):
+        config_parser = argparse.ArgumentParser(
+            description=description,
+        )
+        config_parser.add_argument(
+            "-c", "--config-path",
+            action="append",
+            metavar="CONFIG_FILE",
+            help="Specify config file. Can be given multiple times and"
+                 " may specify directories containing *.yaml files."
+        )
+
+        config_parser.add_argument(
+            "--keys-directory",
+            metavar="DIRECTORY",
+            help="Where files such as certs and signing keys are stored when"
+                 " their location is given explicitly in the config."
+                 " Defaults to the directory containing the last config file",
+        )
+
+        config_args = config_parser.parse_args(argv)
+
+        config_files = find_config_files(search_paths=config_args.config_path)
+
         obj = cls()
+        obj.read_config_files(
+            config_files,
+            keys_directory=config_args.keys_directory,
+            generate_keys=False,
+        )
+        return obj
 
+    @classmethod
+    def load_or_generate_config(cls, description, argv):
         config_parser = argparse.ArgumentParser(add_help=False)
         config_parser.add_argument(
             "-c", "--config-path",
@@ -176,7 +207,7 @@ class Config(object):
         config_parser.add_argument(
             "--report-stats",
             action="store",
-            help="Stuff",
+            help="Whether the generated config reports anonymized usage statistics",
             choices=["yes", "no"]
         )
         config_parser.add_argument(
@@ -197,36 +228,11 @@ class Config(object):
         )
         config_args, remaining_args = config_parser.parse_known_args(argv)
 
+        config_files = find_config_files(search_paths=config_args.config_path)
+
         generate_keys = config_args.generate_keys
 
-        config_files = []
-        if config_args.config_path:
-            for config_path in config_args.config_path:
-                if os.path.isdir(config_path):
-                    # We accept specifying directories as config paths, we search
-                    # inside that directory for all files matching *.yaml, and then
-                    # we apply them in *sorted* order.
-                    files = []
-                    for entry in os.listdir(config_path):
-                        entry_path = os.path.join(config_path, entry)
-                        if not os.path.isfile(entry_path):
-                            print (
-                                "Found subdirectory in config directory: %r. IGNORING."
-                            ) % (entry_path, )
-                            continue
-
-                        if not entry.endswith(".yaml"):
-                            print (
-                                "Found file in config directory that does not"
-                                " end in '.yaml': %r. IGNORING."
-                            ) % (entry_path, )
-                            continue
-
-                        files.append(entry_path)
-
-                    config_files.extend(sorted(files))
-                else:
-                    config_files.append(config_path)
+        obj = cls()
 
         if config_args.generate_config:
             if config_args.report_stats is None:
@@ -299,28 +305,43 @@ class Config(object):
                 " -c CONFIG-FILE\""
             )
 
-        if config_args.keys_directory:
-            config_dir_path = config_args.keys_directory
-        else:
-            config_dir_path = os.path.dirname(config_args.config_path[-1])
-        config_dir_path = os.path.abspath(config_dir_path)
+        obj.read_config_files(
+            config_files,
+            keys_directory=config_args.keys_directory,
+            generate_keys=generate_keys,
+        )
+
+        if generate_keys:
+            return None
+
+        obj.invoke_all("read_arguments", args)
+
+        return obj
+
+    def read_config_files(self, config_files, keys_directory=None,
+                          generate_keys=False):
+        if not keys_directory:
+            keys_directory = os.path.dirname(config_files[-1])
+
+        config_dir_path = os.path.abspath(keys_directory)
 
         specified_config = {}
         for config_file in config_files:
-            yaml_config = cls.read_config_file(config_file)
+            yaml_config = self.read_config_file(config_file)
             specified_config.update(yaml_config)
 
         if "server_name" not in specified_config:
             raise ConfigError(MISSING_SERVER_NAME)
 
         server_name = specified_config["server_name"]
-        _, config = obj.generate_config(
+        _, config = self.generate_config(
             config_dir_path=config_dir_path,
             server_name=server_name,
             is_generating_file=False,
         )
         config.pop("log_config")
         config.update(specified_config)
+
         if "report_stats" not in config:
             raise ConfigError(
                 MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
@@ -328,11 +349,51 @@ class Config(object):
             )
 
         if generate_keys:
-            obj.invoke_all("generate_files", config)
+            self.invoke_all("generate_files", config)
             return
 
-        obj.invoke_all("read_config", config)
-
-        obj.invoke_all("read_arguments", args)
-
-        return obj
+        self.invoke_all("read_config", config)
+
+
+def find_config_files(search_paths):
+    """Finds config files using a list of search paths. If a path is a file
+    then that file path is added to the list. If a search path is a directory
+    then all the "*.yaml" files in that directory are added to the list in
+    sorted order.
+
+    Args:
+        search_paths(list(str)): A list of paths to search.
+
+    Returns:
+        list(str): A list of file paths.
+    """
+
+    config_files = []
+    if search_paths:
+        for config_path in search_paths:
+            if os.path.isdir(config_path):
+                # We accept specifying directories as config paths, we search
+                # inside that directory for all files matching *.yaml, and then
+                # we apply them in *sorted* order.
+                files = []
+                for entry in os.listdir(config_path):
+                    entry_path = os.path.join(config_path, entry)
+                    if not os.path.isfile(entry_path):
+                        print (
+                            "Found subdirectory in config directory: %r. IGNORING."
+                        ) % (entry_path, )
+                        continue
+
+                    if not entry.endswith(".yaml"):
+                        print (
+                            "Found file in config directory that does not"
+                            " end in '.yaml': %r. IGNORING."
+                        ) % (entry_path, )
+                        continue
+
+                    files.append(entry_path)
+
+                config_files.extend(sorted(files))
+            else:
+                config_files.append(config_path)
+    return config_files
diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py
index b54dbabbee..7ba0c2de6a 100644
--- a/synapse/config/captcha.py
+++ b/synapse/config/captcha.py
@@ -27,6 +27,7 @@ class CaptchaConfig(Config):
     def default_config(self, **kwargs):
         return """\
         ## Captcha ##
+        # See docs/CAPTCHA_SETUP for full details of configuring this.
 
         # This Home Server's ReCAPTCHA public key.
         recaptcha_public_key: "YOUR_PUBLIC_KEY"
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 90bdd08f00..a187161272 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -89,7 +89,7 @@ class EmailConfig(Config):
         #   enable_notifs: false
         #   smtp_host: "localhost"
         #   smtp_port: 25
-        #   notif_from: Your Friendly Matrix Home Server <noreply@example.com>
+        #   notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
         #   app_name: Matrix
         #   template_dir: res/templates
         #   notif_template_html: notif_mail.html
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index fc2445484c..79b0534b3b 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -32,13 +32,15 @@ from .password import PasswordConfig
 from .jwt import JWTConfig
 from .ldap import LDAPConfig
 from .emailconfig import EmailConfig
+from .workers import WorkerConfig
 
 
 class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
                        RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
                        VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
                        AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
-                       JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,):
+                       JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,
+                       WorkerConfig,):
     pass
 
 
diff --git a/synapse/config/ldap.py b/synapse/config/ldap.py
index 9c14593a99..d83c2230be 100644
--- a/synapse/config/ldap.py
+++ b/synapse/config/ldap.py
@@ -13,40 +13,88 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config
+from ._base import Config, ConfigError
+
+
+MISSING_LDAP3 = (
+    "Missing ldap3 library. This is required for LDAP Authentication."
+)
+
+
+class LDAPMode(object):
+    SIMPLE = "simple",
+    SEARCH = "search",
+
+    LIST = (SIMPLE, SEARCH)
 
 
 class LDAPConfig(Config):
     def read_config(self, config):
-        ldap_config = config.get("ldap_config", None)
-        if ldap_config:
-            self.ldap_enabled = ldap_config.get("enabled", False)
-            self.ldap_server = ldap_config["server"]
-            self.ldap_port = ldap_config["port"]
-            self.ldap_tls = ldap_config.get("tls", False)
-            self.ldap_search_base = ldap_config["search_base"]
-            self.ldap_search_property = ldap_config["search_property"]
-            self.ldap_email_property = ldap_config["email_property"]
-            self.ldap_full_name_property = ldap_config["full_name_property"]
-        else:
-            self.ldap_enabled = False
-            self.ldap_server = None
-            self.ldap_port = None
-            self.ldap_tls = False
-            self.ldap_search_base = None
-            self.ldap_search_property = None
-            self.ldap_email_property = None
-            self.ldap_full_name_property = None
+        ldap_config = config.get("ldap_config", {})
+
+        self.ldap_enabled = ldap_config.get("enabled", False)
+
+        if self.ldap_enabled:
+            # verify dependencies are available
+            try:
+                import ldap3
+                ldap3  # to stop unused lint
+            except ImportError:
+                raise ConfigError(MISSING_LDAP3)
+
+            self.ldap_mode = LDAPMode.SIMPLE
+
+            # verify config sanity
+            self.require_keys(ldap_config, [
+                "uri",
+                "base",
+                "attributes",
+            ])
+
+            self.ldap_uri = ldap_config["uri"]
+            self.ldap_start_tls = ldap_config.get("start_tls", False)
+            self.ldap_base = ldap_config["base"]
+            self.ldap_attributes = ldap_config["attributes"]
+
+            if "bind_dn" in ldap_config:
+                self.ldap_mode = LDAPMode.SEARCH
+                self.require_keys(ldap_config, [
+                    "bind_dn",
+                    "bind_password",
+                ])
+
+                self.ldap_bind_dn = ldap_config["bind_dn"]
+                self.ldap_bind_password = ldap_config["bind_password"]
+                self.ldap_filter = ldap_config.get("filter", None)
+
+            # verify attribute lookup
+            self.require_keys(ldap_config['attributes'], [
+                "uid",
+                "name",
+                "mail",
+            ])
+
+    def require_keys(self, config, required):
+        missing = [key for key in required if key not in config]
+        if missing:
+            raise ConfigError(
+                "LDAP enabled but missing required config values: {}".format(
+                    ", ".join(missing)
+                )
+            )
 
     def default_config(self, **kwargs):
         return """\
         # ldap_config:
         #   enabled: true
-        #   server: "ldap://localhost"
-        #   port: 389
-        #   tls: false
-        #   search_base: "ou=Users,dc=example,dc=com"
-        #   search_property: "cn"
-        #   email_property: "email"
-        #   full_name_property: "givenName"
+        #   uri: "ldap://ldap.example.com:389"
+        #   start_tls: true
+        #   base: "ou=users,dc=example,dc=com"
+        #   attributes:
+        #      uid: "cn"
+        #      mail: "email"
+        #      name: "givenName"
+        #   #bind_dn:
+        #   #bind_password:
+        #   #filter: "(objectClass=posixAccount)"
         """
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 5047db898f..dc68683fbc 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -126,54 +126,58 @@ class LoggingConfig(Config):
                 )
 
     def setup_logging(self):
-        log_format = (
-            "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
-            " - %(message)s"
-        )
-        if self.log_config is None:
-
-            level = logging.INFO
-            level_for_storage = logging.INFO
-            if self.verbosity:
-                level = logging.DEBUG
-                if self.verbosity > 1:
-                    level_for_storage = logging.DEBUG
-
-            # FIXME: we need a logging.WARN for a -q quiet option
-            logger = logging.getLogger('')
-            logger.setLevel(level)
-
-            logging.getLogger('synapse.storage').setLevel(level_for_storage)
-
-            formatter = logging.Formatter(log_format)
-            if self.log_file:
-                # TODO: Customisable file size / backup count
-                handler = logging.handlers.RotatingFileHandler(
-                    self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
-                )
-
-                def sighup(signum, stack):
-                    logger.info("Closing log file due to SIGHUP")
-                    handler.doRollover()
-                    logger.info("Opened new log file due to SIGHUP")
-
-                # TODO(paul): obviously this is a terrible mechanism for
-                #   stealing SIGHUP, because it means no other part of synapse
-                #   can use it instead. If we want to catch SIGHUP anywhere
-                #   else as well, I'd suggest we find a nicer way to broadcast
-                #   it around.
-                if getattr(signal, "SIGHUP"):
-                    signal.signal(signal.SIGHUP, sighup)
-            else:
-                handler = logging.StreamHandler()
-            handler.setFormatter(formatter)
-
-            handler.addFilter(LoggingContextFilter(request=""))
-
-            logger.addHandler(handler)
+        setup_logging(self.log_config, self.log_file, self.verbosity)
+
+
+def setup_logging(log_config=None, log_file=None, verbosity=None):
+    log_format = (
+        "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
+        " - %(message)s"
+    )
+    if log_config is None:
+
+        level = logging.INFO
+        level_for_storage = logging.INFO
+        if verbosity:
+            level = logging.DEBUG
+            if verbosity > 1:
+                level_for_storage = logging.DEBUG
+
+        # FIXME: we need a logging.WARN for a -q quiet option
+        logger = logging.getLogger('')
+        logger.setLevel(level)
+
+        logging.getLogger('synapse.storage').setLevel(level_for_storage)
+
+        formatter = logging.Formatter(log_format)
+        if log_file:
+            # TODO: Customisable file size / backup count
+            handler = logging.handlers.RotatingFileHandler(
+                log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
+            )
+
+            def sighup(signum, stack):
+                logger.info("Closing log file due to SIGHUP")
+                handler.doRollover()
+                logger.info("Opened new log file due to SIGHUP")
+
+            # TODO(paul): obviously this is a terrible mechanism for
+            #   stealing SIGHUP, because it means no other part of synapse
+            #   can use it instead. If we want to catch SIGHUP anywhere
+            #   else as well, I'd suggest we find a nicer way to broadcast
+            #   it around.
+            if getattr(signal, "SIGHUP"):
+                signal.signal(signal.SIGHUP, sighup)
         else:
-            with open(self.log_config, 'r') as f:
-                logging.config.dictConfig(yaml.load(f))
+            handler = logging.StreamHandler()
+        handler.setFormatter(formatter)
+
+        handler.addFilter(LoggingContextFilter(request=""))
+
+        logger.addHandler(handler)
+    else:
+        with open(log_config, 'r') as f:
+            logging.config.dictConfig(yaml.load(f))
 
-        observer = PythonLoggingObserver()
-        observer.start()
+    observer = PythonLoggingObserver()
+    observer.start()
diff --git a/synapse/config/password.py b/synapse/config/password.py
index dec801ef41..a4bd171399 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -23,10 +23,14 @@ class PasswordConfig(Config):
     def read_config(self, config):
         password_config = config.get("password_config", {})
         self.password_enabled = password_config.get("enabled", True)
+        self.password_pepper = password_config.get("pepper", "")
 
     def default_config(self, config_dir_path, server_name, **kwargs):
         return """
         # Enable password for login.
         password_config:
            enabled: true
+           # Uncomment and change to a secret random string for extra security.
+           # DO NOT CHANGE THIS AFTER INITIAL SETUP!
+           #pepper: ""
         """
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 0b5f462e44..51eaf423ce 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config
+from ._base import Config, ConfigError
 
 
 class ServerConfig(Config):
@@ -27,8 +27,9 @@ class ServerConfig(Config):
         self.daemonize = config.get("daemonize")
         self.print_pidfile = config.get("print_pidfile")
         self.user_agent_suffix = config.get("user_agent_suffix")
-        self.use_frozen_dicts = config.get("use_frozen_dicts", True)
+        self.use_frozen_dicts = config.get("use_frozen_dicts", False)
         self.public_baseurl = config.get("public_baseurl")
+        self.secondary_directory_servers = config.get("secondary_directory_servers", [])
 
         if self.public_baseurl is not None:
             if self.public_baseurl[-1] != '/':
@@ -37,6 +38,8 @@ class ServerConfig(Config):
 
         self.listeners = config.get("listeners", [])
 
+        self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
+
         bind_port = config.get("bind_port")
         if bind_port:
             self.listeners = []
@@ -104,26 +107,6 @@ class ServerConfig(Config):
                 ]
             })
 
-        # Attempt to guess the content_addr for the v0 content repostitory
-        content_addr = config.get("content_addr")
-        if not content_addr:
-            for listener in self.listeners:
-                if listener["type"] == "http" and not listener.get("tls", False):
-                    unsecure_port = listener["port"]
-                    break
-            else:
-                raise RuntimeError("Could not determine 'content_addr'")
-
-            host = self.server_name
-            if ':' not in host:
-                host = "%s:%d" % (host, unsecure_port)
-            else:
-                host = host.split(':')[0]
-                host = "%s:%d" % (host, unsecure_port)
-            content_addr = "http://%s" % (host,)
-
-        self.content_addr = content_addr
-
     def default_config(self, server_name, **kwargs):
         if ":" in server_name:
             bind_port = int(server_name.split(":")[1])
@@ -156,6 +139,17 @@ class ServerConfig(Config):
         # hard limit.
         soft_file_limit: 0
 
+        # The GC threshold parameters to pass to `gc.set_threshold`, if defined
+        # gc_thresholds: [700, 10, 10]
+
+        # A list of other Home Servers to fetch the public room directory from
+        # and include in the public room directory of this home server
+        # This is a temporary stopgap solution to populate new server with a
+        # list of rooms until there exists a good solution of a decentralized
+        # room directory.
+        # secondary_directory_servers:
+        #     - matrix.org
+
         # List of ports that Synapse should listen on, their purpose and their
         # configuration.
         listeners:
@@ -237,3 +231,20 @@ class ServerConfig(Config):
                                   type=int,
                                   help="Turn on the twisted telnet manhole"
                                   " service on the given port.")
+
+
+def read_gc_thresholds(thresholds):
+    """Reads the three integer thresholds for garbage collection. Ensures that
+    the thresholds are integers if thresholds are supplied.
+    """
+    if thresholds is None:
+        return None
+    try:
+        assert len(thresholds) == 3
+        return (
+            int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
+        )
+    except:
+        raise ConfigError(
+            "Value of `gc_threshold` must be a list of three integers if set"
+        )
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
new file mode 100644
index 0000000000..904789d155
--- /dev/null
+++ b/synapse/config/workers.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 matrix.org
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class WorkerConfig(Config):
+    """The workers are processes run separately to the main synapse process.
+    They have their own pid_file and listener configuration. They use the
+    replication_url to talk to the main synapse process."""
+
+    def read_config(self, config):
+        self.worker_app = config.get("worker_app")
+        self.worker_listeners = config.get("worker_listeners")
+        self.worker_daemonize = config.get("worker_daemonize")
+        self.worker_pid_file = config.get("worker_pid_file")
+        self.worker_log_file = config.get("worker_log_file")
+        self.worker_log_config = config.get("worker_log_config")
+        self.worker_replication_url = config.get("worker_replication_url")
diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py
index 54b83da9d8..c2bd64d6c2 100644
--- a/synapse/crypto/keyclient.py
+++ b/synapse/crypto/keyclient.py
@@ -77,10 +77,12 @@ class SynapseKeyClientProtocol(HTTPClient):
     def __init__(self):
         self.remote_key = defer.Deferred()
         self.host = None
+        self._peer = None
 
     def connectionMade(self):
-        self.host = self.transport.getHost()
-        logger.debug("Connected to %s", self.host)
+        self._peer = self.transport.getPeer()
+        logger.debug("Connected to %s", self._peer)
+
         self.sendCommand(b"GET", self.path)
         if self.host:
             self.sendHeader(b"Host", self.host)
@@ -124,7 +126,10 @@ class SynapseKeyClientProtocol(HTTPClient):
         self.timer.cancel()
 
     def on_timeout(self):
-        logger.debug("Timeout waiting for response from %s", self.host)
+        logger.debug(
+            "Timeout waiting for response from %s: %s",
+            self.host, self._peer,
+        )
         self.errback(IOError("Timeout waiting for response"))
         self.transport.abortConnection()
 
@@ -133,4 +138,5 @@ class SynapseKeyClientFactory(Factory):
     def protocol(self):
         protocol = SynapseKeyClientProtocol()
         protocol.path = self.path
+        protocol.host = self.host
         return protocol
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index d08ee0aa91..7cd11cfae7 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -44,7 +44,25 @@ import logging
 logger = logging.getLogger(__name__)
 
 
-KeyGroup = namedtuple("KeyGroup", ("server_name", "group_id", "key_ids"))
+VerifyKeyRequest = namedtuple("VerifyRequest", (
+    "server_name", "key_ids", "json_object", "deferred"
+))
+"""
+A request for a verify key to verify a JSON object.
+
+Attributes:
+    server_name(str): The name of the server to verify against.
+    key_ids(set(str)): The set of key_ids to that could be used to verify the
+        JSON object
+    json_object(dict): The JSON object to verify.
+    deferred(twisted.internet.defer.Deferred):
+        A deferred (server_name, key_id, verify_key) tuple that resolves when
+        a verify key has been fetched
+"""
+
+
+class KeyLookupError(ValueError):
+    pass
 
 
 class Keyring(object):
@@ -74,39 +92,32 @@ class Keyring(object):
             list of deferreds indicating success or failure to verify each
             json object's signature for the given server_name.
         """
-        group_id_to_json = {}
-        group_id_to_group = {}
-        group_ids = []
-
-        next_group_id = 0
-        deferreds = {}
+        verify_requests = []
 
         for server_name, json_object in server_and_json:
             logger.debug("Verifying for %s", server_name)
-            group_id = next_group_id
-            next_group_id += 1
-            group_ids.append(group_id)
 
             key_ids = signature_ids(json_object, server_name)
             if not key_ids:
-                deferreds[group_id] = defer.fail(SynapseError(
+                deferred = defer.fail(SynapseError(
                     400,
                     "Not signed with a supported algorithm",
                     Codes.UNAUTHORIZED,
                 ))
             else:
-                deferreds[group_id] = defer.Deferred()
+                deferred = defer.Deferred()
 
-            group = KeyGroup(server_name, group_id, key_ids)
+            verify_request = VerifyKeyRequest(
+                server_name, key_ids, json_object, deferred
+            )
 
-            group_id_to_group[group_id] = group
-            group_id_to_json[group_id] = json_object
+            verify_requests.append(verify_request)
 
         @defer.inlineCallbacks
-        def handle_key_deferred(group, deferred):
-            server_name = group.server_name
+        def handle_key_deferred(verify_request):
+            server_name = verify_request.server_name
             try:
-                _, _, key_id, verify_key = yield deferred
+                _, key_id, verify_key = yield verify_request.deferred
             except IOError as e:
                 logger.warn(
                     "Got IOError when downloading keys for %s: %s %s",
@@ -128,7 +139,7 @@ class Keyring(object):
                     Codes.UNAUTHORIZED,
                 )
 
-            json_object = group_id_to_json[group.group_id]
+            json_object = verify_request.json_object
 
             try:
                 verify_signed_json(json_object, server_name, verify_key)
@@ -157,36 +168,34 @@ class Keyring(object):
 
             # Actually start fetching keys.
             wait_on_deferred.addBoth(
-                lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
+                lambda _: self.get_server_verify_keys(verify_requests)
             )
 
             # When we've finished fetching all the keys for a given server_name,
             # resolve the deferred passed to `wait_for_previous_lookups` so that
             # any lookups waiting will proceed.
-            server_to_gids = {}
+            server_to_request_ids = {}
 
-            def remove_deferreds(res, server_name, group_id):
-                server_to_gids[server_name].discard(group_id)
-                if not server_to_gids[server_name]:
+            def remove_deferreds(res, server_name, verify_request):
+                request_id = id(verify_request)
+                server_to_request_ids[server_name].discard(request_id)
+                if not server_to_request_ids[server_name]:
                     d = server_to_deferred.pop(server_name, None)
                     if d:
                         d.callback(None)
                 return res
 
-            for g_id, deferred in deferreds.items():
-                server_name = group_id_to_group[g_id].server_name
-                server_to_gids.setdefault(server_name, set()).add(g_id)
-                deferred.addBoth(remove_deferreds, server_name, g_id)
+            for verify_request in verify_requests:
+                server_name = verify_request.server_name
+                request_id = id(verify_request)
+                server_to_request_ids.setdefault(server_name, set()).add(request_id)
+                deferred.addBoth(remove_deferreds, server_name, verify_request)
 
         # Pass those keys to handle_key_deferred so that the json object
         # signatures can be verified
         return [
-            preserve_context_over_fn(
-                handle_key_deferred,
-                group_id_to_group[g_id],
-                deferreds[g_id],
-            )
-            for g_id in group_ids
+            preserve_context_over_fn(handle_key_deferred, verify_request)
+            for verify_request in verify_requests
         ]
 
     @defer.inlineCallbacks
@@ -220,7 +229,7 @@ class Keyring(object):
 
             d.addBoth(rm, server_name)
 
-    def get_server_verify_keys(self, group_id_to_group, group_id_to_deferred):
+    def get_server_verify_keys(self, verify_requests):
         """Takes a dict of KeyGroups and tries to find at least one key for
         each group.
         """
@@ -237,62 +246,64 @@ class Keyring(object):
             merged_results = {}
 
             missing_keys = {}
-            for group in group_id_to_group.values():
-                missing_keys.setdefault(group.server_name, set()).update(
-                    group.key_ids
+            for verify_request in verify_requests:
+                missing_keys.setdefault(verify_request.server_name, set()).update(
+                    verify_request.key_ids
                 )
 
             for fn in key_fetch_fns:
                 results = yield fn(missing_keys.items())
                 merged_results.update(results)
 
-                # We now need to figure out which groups we have keys for
-                # and which we don't
-                missing_groups = {}
-                for group in group_id_to_group.values():
-                    for key_id in group.key_ids:
-                        if key_id in merged_results[group.server_name]:
+                # We now need to figure out which verify requests we have keys
+                # for and which we don't
+                missing_keys = {}
+                requests_missing_keys = []
+                for verify_request in verify_requests:
+                    server_name = verify_request.server_name
+                    result_keys = merged_results[server_name]
+
+                    if verify_request.deferred.called:
+                        # We've already called this deferred, which probably
+                        # means that we've already found a key for it.
+                        continue
+
+                    for key_id in verify_request.key_ids:
+                        if key_id in result_keys:
                             with PreserveLoggingContext():
-                                group_id_to_deferred[group.group_id].callback((
-                                    group.group_id,
-                                    group.server_name,
+                                verify_request.deferred.callback((
+                                    server_name,
                                     key_id,
-                                    merged_results[group.server_name][key_id],
+                                    result_keys[key_id],
                                 ))
                             break
                     else:
-                        missing_groups.setdefault(
-                            group.server_name, []
-                        ).append(group)
-
-                if not missing_groups:
+                        # The else block is only reached if the loop above
+                        # doesn't break.
+                        missing_keys.setdefault(server_name, set()).update(
+                            verify_request.key_ids
+                        )
+                        requests_missing_keys.append(verify_request)
+
+                if not missing_keys:
                     break
 
-                missing_keys = {
-                    server_name: set(
-                        key_id for group in groups for key_id in group.key_ids
-                    )
-                    for server_name, groups in missing_groups.items()
-                }
-
-            for group in missing_groups.values():
-                group_id_to_deferred[group.group_id].errback(SynapseError(
+            for verify_request in requests_missing_keys.values():
+                verify_request.deferred.errback(SynapseError(
                     401,
                     "No key for %s with id %s" % (
-                        group.server_name, group.key_ids,
+                        verify_request.server_name, verify_request.key_ids,
                     ),
                     Codes.UNAUTHORIZED,
                 ))
 
         def on_err(err):
-            for deferred in group_id_to_deferred.values():
-                if not deferred.called:
-                    deferred.errback(err)
+            for verify_request in verify_requests:
+                if not verify_request.deferred.called:
+                    verify_request.deferred.errback(err)
 
         do_iterations().addErrback(on_err)
 
-        return group_id_to_deferred
-
     @defer.inlineCallbacks
     def get_keys_from_store(self, server_name_and_key_ids):
         res = yield defer.gatherResults(
@@ -356,7 +367,7 @@ class Keyring(object):
                     )
                 except Exception as e:
                     logger.info(
-                        "Unable to getting key %r for %r directly: %s %s",
+                        "Unable to get key %r for %r directly: %s %s",
                         key_ids, server_name,
                         type(e).__name__, str(e.message),
                     )
@@ -418,7 +429,7 @@ class Keyring(object):
         for response in responses:
             if (u"signatures" not in response
                     or perspective_name not in response[u"signatures"]):
-                raise ValueError(
+                raise KeyLookupError(
                     "Key response not signed by perspective server"
                     " %r" % (perspective_name,)
                 )
@@ -441,13 +452,13 @@ class Keyring(object):
                     list(response[u"signatures"][perspective_name]),
                     list(perspective_keys)
                 )
-                raise ValueError(
+                raise KeyLookupError(
                     "Response not signed with a known key for perspective"
                     " server %r" % (perspective_name,)
                 )
 
             processed_response = yield self.process_v2_response(
-                perspective_name, response
+                perspective_name, response, only_from_server=False
             )
 
             for server_name, response_keys in processed_response.items():
@@ -484,10 +495,10 @@ class Keyring(object):
 
             if (u"signatures" not in response
                     or server_name not in response[u"signatures"]):
-                raise ValueError("Key response not signed by remote server")
+                raise KeyLookupError("Key response not signed by remote server")
 
             if "tls_fingerprints" not in response:
-                raise ValueError("Key response missing TLS fingerprints")
+                raise KeyLookupError("Key response missing TLS fingerprints")
 
             certificate_bytes = crypto.dump_certificate(
                 crypto.FILETYPE_ASN1, tls_certificate
@@ -501,7 +512,7 @@ class Keyring(object):
                     response_sha256_fingerprints.add(fingerprint[u"sha256"])
 
             if sha256_fingerprint_b64 not in response_sha256_fingerprints:
-                raise ValueError("TLS certificate not allowed by fingerprints")
+                raise KeyLookupError("TLS certificate not allowed by fingerprints")
 
             response_keys = yield self.process_v2_response(
                 from_server=server_name,
@@ -527,7 +538,7 @@ class Keyring(object):
 
     @defer.inlineCallbacks
     def process_v2_response(self, from_server, response_json,
-                            requested_ids=[]):
+                            requested_ids=[], only_from_server=True):
         time_now_ms = self.clock.time_msec()
         response_keys = {}
         verify_keys = {}
@@ -551,9 +562,16 @@ class Keyring(object):
 
         results = {}
         server_name = response_json["server_name"]
+        if only_from_server:
+            if server_name != from_server:
+                raise KeyLookupError(
+                    "Expected a response for server %r not %r" % (
+                        from_server, server_name
+                    )
+                )
         for key_id in response_json["signatures"].get(server_name, {}):
             if key_id not in response_json["verify_keys"]:
-                raise ValueError(
+                raise KeyLookupError(
                     "Key response must include verification keys for all"
                     " signatures"
                 )
@@ -621,15 +639,15 @@ class Keyring(object):
 
         if ("signatures" not in response
                 or server_name not in response["signatures"]):
-            raise ValueError("Key response not signed by remote server")
+            raise KeyLookupError("Key response not signed by remote server")
 
         if "tls_certificate" not in response:
-            raise ValueError("Key response missing TLS certificate")
+            raise KeyLookupError("Key response missing TLS certificate")
 
         tls_certificate_b64 = response["tls_certificate"]
 
         if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
-            raise ValueError("TLS certificate doesn't match")
+            raise KeyLookupError("TLS certificate doesn't match")
 
         # Cache the result in the datastore.
 
@@ -645,7 +663,7 @@ class Keyring(object):
 
         for key_id in response["signatures"][server_name]:
             if key_id not in response["verify_keys"]:
-                raise ValueError(
+                raise KeyLookupError(
                     "Key response must include verification keys for all"
                     " signatures"
                 )
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index aab18d7f71..0e9fd902af 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -88,6 +88,8 @@ def prune_event(event):
 
     if "age_ts" in event.unsigned:
         allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
+    if "replaces_state" in event.unsigned:
+        allowed_fields["unsigned"]["replaces_state"] = event.unsigned["replaces_state"]
 
     return type(event)(
         allowed_fields,
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index a0b7cb7963..da2f5e8cfd 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -31,6 +31,9 @@ logger = logging.getLogger(__name__)
 
 
 class FederationBase(object):
+    def __init__(self, hs):
+        pass
+
     @defer.inlineCallbacks
     def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
                                        include_none=False):
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 37ee469fa2..9ba3151713 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -24,6 +24,7 @@ from synapse.api.errors import (
     CodeMessageException, HttpResponseException, SynapseError,
 )
 from synapse.util import unwrapFirstError
+from synapse.util.async import concurrently_execute
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.logutils import log_function
 from synapse.events import FrozenEvent
@@ -50,7 +51,33 @@ sent_edus_counter = metrics.register_counter("sent_edus")
 sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
 
 
+PDU_RETRY_TIME_MS = 1 * 60 * 1000
+
+
 class FederationClient(FederationBase):
+    def __init__(self, hs):
+        super(FederationClient, self).__init__(hs)
+
+        self.pdu_destination_tried = {}
+        self._clock.looping_call(
+            self._clear_tried_cache, 60 * 1000,
+        )
+
+    def _clear_tried_cache(self):
+        """Clear pdu_destination_tried cache"""
+        now = self._clock.time_msec()
+
+        old_dict = self.pdu_destination_tried
+        self.pdu_destination_tried = {}
+
+        for event_id, destination_dict in old_dict.items():
+            destination_dict = {
+                dest: time
+                for dest, time in destination_dict.items()
+                if time + PDU_RETRY_TIME_MS > now
+            }
+            if destination_dict:
+                self.pdu_destination_tried[event_id] = destination_dict
 
     def start_get_pdu_cache(self):
         self._get_pdu_cache = ExpiringCache(
@@ -233,12 +260,19 @@ class FederationClient(FederationBase):
         # TODO: Rate limit the number of times we try and get the same event.
 
         if self._get_pdu_cache:
-            e = self._get_pdu_cache.get(event_id)
-            if e:
-                defer.returnValue(e)
+            ev = self._get_pdu_cache.get(event_id)
+            if ev:
+                defer.returnValue(ev)
+
+        pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
 
         pdu = None
         for destination in destinations:
+            now = self._clock.time_msec()
+            last_attempt = pdu_attempts.get(destination, 0)
+            if last_attempt + PDU_RETRY_TIME_MS > now:
+                continue
+
             try:
                 limiter = yield get_retry_limiter(
                     destination,
@@ -266,25 +300,19 @@ class FederationClient(FederationBase):
 
                         break
 
-            except SynapseError:
-                logger.info(
-                    "Failed to get PDU %s from %s because %s",
-                    event_id, destination, e,
-                )
-                continue
-            except CodeMessageException as e:
-                if 400 <= e.code < 500:
-                    raise
+                pdu_attempts[destination] = now
 
+            except SynapseError as e:
                 logger.info(
                     "Failed to get PDU %s from %s because %s",
                     event_id, destination, e,
                 )
-                continue
             except NotRetryingDestination as e:
                 logger.info(e.message)
                 continue
             except Exception as e:
+                pdu_attempts[destination] = now
+
                 logger.info(
                     "Failed to get PDU %s from %s because %s",
                     event_id, destination, e,
@@ -311,6 +339,42 @@ class FederationClient(FederationBase):
             Deferred: Results in a list of PDUs.
         """
 
+        try:
+            # First we try and ask for just the IDs, as thats far quicker if
+            # we have most of the state and auth_chain already.
+            # However, this may 404 if the other side has an old synapse.
+            result = yield self.transport_layer.get_room_state_ids(
+                destination, room_id, event_id=event_id,
+            )
+
+            state_event_ids = result["pdu_ids"]
+            auth_event_ids = result.get("auth_chain_ids", [])
+
+            fetched_events, failed_to_fetch = yield self.get_events(
+                [destination], room_id, set(state_event_ids + auth_event_ids)
+            )
+
+            if failed_to_fetch:
+                logger.warn("Failed to get %r", failed_to_fetch)
+
+            event_map = {
+                ev.event_id: ev for ev in fetched_events
+            }
+
+            pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
+            auth_chain = [
+                event_map[e_id] for e_id in auth_event_ids if e_id in event_map
+            ]
+
+            auth_chain.sort(key=lambda e: e.depth)
+
+            defer.returnValue((pdus, auth_chain))
+        except HttpResponseException as e:
+            if e.code == 400 or e.code == 404:
+                logger.info("Failed to use get_room_state_ids API, falling back")
+            else:
+                raise e
+
         result = yield self.transport_layer.get_room_state(
             destination, room_id, event_id=event_id,
         )
@@ -324,12 +388,26 @@ class FederationClient(FederationBase):
             for p in result.get("auth_chain", [])
         ]
 
+        seen_events = yield self.store.get_events([
+            ev.event_id for ev in itertools.chain(pdus, auth_chain)
+        ])
+
         signed_pdus = yield self._check_sigs_and_hash_and_fetch(
-            destination, pdus, outlier=True
+            destination,
+            [p for p in pdus if p.event_id not in seen_events],
+            outlier=True
+        )
+        signed_pdus.extend(
+            seen_events[p.event_id] for p in pdus if p.event_id in seen_events
         )
 
         signed_auth = yield self._check_sigs_and_hash_and_fetch(
-            destination, auth_chain, outlier=True
+            destination,
+            [p for p in auth_chain if p.event_id not in seen_events],
+            outlier=True
+        )
+        signed_auth.extend(
+            seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
         )
 
         signed_auth.sort(key=lambda e: e.depth)
@@ -337,6 +415,67 @@ class FederationClient(FederationBase):
         defer.returnValue((signed_pdus, signed_auth))
 
     @defer.inlineCallbacks
+    def get_events(self, destinations, room_id, event_ids, return_local=True):
+        """Fetch events from some remote destinations, checking if we already
+        have them.
+
+        Args:
+            destinations (list)
+            room_id (str)
+            event_ids (list)
+            return_local (bool): Whether to include events we already have in
+                the DB in the returned list of events
+
+        Returns:
+            Deferred: A deferred resolving to a 2-tuple where the first is a list of
+            events and the second is a list of event ids that we failed to fetch.
+        """
+        if return_local:
+            seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
+            signed_events = seen_events.values()
+        else:
+            seen_events = yield self.store.have_events(event_ids)
+            signed_events = []
+
+        failed_to_fetch = set()
+
+        missing_events = set(event_ids)
+        for k in seen_events:
+            missing_events.discard(k)
+
+        if not missing_events:
+            defer.returnValue((signed_events, failed_to_fetch))
+
+        def random_server_list():
+            srvs = list(destinations)
+            random.shuffle(srvs)
+            return srvs
+
+        batch_size = 20
+        missing_events = list(missing_events)
+        for i in xrange(0, len(missing_events), batch_size):
+            batch = set(missing_events[i:i + batch_size])
+
+            deferreds = [
+                self.get_pdu(
+                    destinations=random_server_list(),
+                    event_id=e_id,
+                )
+                for e_id in batch
+            ]
+
+            res = yield defer.DeferredList(deferreds, consumeErrors=True)
+            for success, result in res:
+                if success:
+                    signed_events.append(result)
+                    batch.discard(result.event_id)
+
+            # We removed all events we successfully fetched from `batch`
+            failed_to_fetch.update(batch)
+
+        defer.returnValue((signed_events, failed_to_fetch))
+
+    @defer.inlineCallbacks
     @log_function
     def get_event_auth(self, destination, room_id, event_id):
         res = yield self.transport_layer.get_event_auth(
@@ -411,14 +550,19 @@ class FederationClient(FederationBase):
                     (destination, self.event_from_pdu_json(pdu_dict))
                 )
                 break
-            except CodeMessageException:
-                raise
+            except CodeMessageException as e:
+                if not 500 <= e.code < 600:
+                    raise
+                else:
+                    logger.warn(
+                        "Failed to make_%s via %s: %s",
+                        membership, destination, e.message
+                    )
             except Exception as e:
                 logger.warn(
                     "Failed to make_%s via %s: %s",
                     membership, destination, e.message
                 )
-                raise
 
         raise RuntimeError("Failed to send to any server.")
 
@@ -490,8 +634,14 @@ class FederationClient(FederationBase):
                     "auth_chain": signed_auth,
                     "origin": destination,
                 })
-            except CodeMessageException:
-                raise
+            except CodeMessageException as e:
+                if not 500 <= e.code < 600:
+                    raise
+                else:
+                    logger.exception(
+                        "Failed to send_join via %s: %s",
+                        destination, e.message
+                    )
             except Exception as e:
                 logger.exception(
                     "Failed to send_join via %s: %s",
@@ -551,6 +701,25 @@ class FederationClient(FederationBase):
         raise RuntimeError("Failed to send to any server.")
 
     @defer.inlineCallbacks
+    def get_public_rooms(self, destinations):
+        results_by_server = {}
+
+        @defer.inlineCallbacks
+        def _get_result(s):
+            if s == self.server_name:
+                defer.returnValue()
+
+            try:
+                result = yield self.transport_layer.get_public_rooms(s)
+                results_by_server[s] = result
+            except:
+                logger.exception("Error getting room list from server %r", s)
+
+        yield concurrently_execute(_get_result, destinations, 3)
+
+        defer.returnValue(results_by_server)
+
+    @defer.inlineCallbacks
     def query_auth(self, destination, room_id, event_id, local_auth):
         """
         Params:
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index f1d231b9d8..aba19639c7 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -19,11 +19,13 @@ from twisted.internet import defer
 from .federation_base import FederationBase
 from .units import Transaction, Edu
 
+from synapse.util.async import Linearizer
 from synapse.util.logutils import log_function
+from synapse.util.caches.response_cache import ResponseCache
 from synapse.events import FrozenEvent
 import synapse.metrics
 
-from synapse.api.errors import FederationError, SynapseError
+from synapse.api.errors import AuthError, FederationError, SynapseError
 
 from synapse.crypto.event_signing import compute_event_signature
 
@@ -44,6 +46,18 @@ received_queries_counter = metrics.register_counter("received_queries", labels=[
 
 
 class FederationServer(FederationBase):
+    def __init__(self, hs):
+        super(FederationServer, self).__init__(hs)
+
+        self.auth = hs.get_auth()
+
+        self._room_pdu_linearizer = Linearizer()
+        self._server_linearizer = Linearizer()
+
+        # We cache responses to state queries, as they take a while and often
+        # come in waves.
+        self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)
+
     def set_handler(self, handler):
         """Sets the handler that the replication layer will use to communicate
         receipt of new PDUs from other home servers. The required methods are
@@ -83,11 +97,14 @@ class FederationServer(FederationBase):
     @defer.inlineCallbacks
     @log_function
     def on_backfill_request(self, origin, room_id, versions, limit):
-        pdus = yield self.handler.on_backfill_request(
-            origin, room_id, versions, limit
-        )
+        with (yield self._server_linearizer.queue((origin, room_id))):
+            pdus = yield self.handler.on_backfill_request(
+                origin, room_id, versions, limit
+            )
+
+            res = self._transaction_from_pdus(pdus).get_dict()
 
-        defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict()))
+        defer.returnValue((200, res))
 
     @defer.inlineCallbacks
     @log_function
@@ -178,15 +195,59 @@ class FederationServer(FederationBase):
     @defer.inlineCallbacks
     @log_function
     def on_context_state_request(self, origin, room_id, event_id):
-        if event_id:
-            pdus = yield self.handler.get_state_for_pdu(
-                origin, room_id, event_id,
-            )
-            auth_chain = yield self.store.get_auth_chain(
-                [pdu.event_id for pdu in pdus]
-            )
+        if not event_id:
+            raise NotImplementedError("Specify an event")
+
+        in_room = yield self.auth.check_host_in_room(room_id, origin)
+        if not in_room:
+            raise AuthError(403, "Host not in room.")
+
+        result = self._state_resp_cache.get((room_id, event_id))
+        if not result:
+            with (yield self._server_linearizer.queue((origin, room_id))):
+                resp = yield self._state_resp_cache.set(
+                    (room_id, event_id),
+                    self._on_context_state_request_compute(room_id, event_id)
+                )
+        else:
+            resp = yield result
+
+        defer.returnValue((200, resp))
+
+    @defer.inlineCallbacks
+    def on_state_ids_request(self, origin, room_id, event_id):
+        if not event_id:
+            raise NotImplementedError("Specify an event")
+
+        in_room = yield self.auth.check_host_in_room(room_id, origin)
+        if not in_room:
+            raise AuthError(403, "Host not in room.")
 
-            for event in auth_chain:
+        pdus = yield self.handler.get_state_for_pdu(
+            room_id, event_id,
+        )
+        auth_chain = yield self.store.get_auth_chain(
+            [pdu.event_id for pdu in pdus]
+        )
+
+        defer.returnValue((200, {
+            "pdu_ids": [pdu.event_id for pdu in pdus],
+            "auth_chain_ids": [pdu.event_id for pdu in auth_chain],
+        }))
+
+    @defer.inlineCallbacks
+    def _on_context_state_request_compute(self, room_id, event_id):
+        pdus = yield self.handler.get_state_for_pdu(
+            room_id, event_id,
+        )
+        auth_chain = yield self.store.get_auth_chain(
+            [pdu.event_id for pdu in pdus]
+        )
+
+        for event in auth_chain:
+            # We sign these again because there was a bug where we
+            # incorrectly signed things the first time round
+            if self.hs.is_mine_id(event.event_id):
                 event.signatures.update(
                     compute_event_signature(
                         event,
@@ -194,13 +255,11 @@ class FederationServer(FederationBase):
                         self.hs.config.signing_key[0]
                     )
                 )
-        else:
-            raise NotImplementedError("Specify an event")
 
-        defer.returnValue((200, {
+        defer.returnValue({
             "pdus": [pdu.get_pdu_json() for pdu in pdus],
             "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
-        }))
+        })
 
     @defer.inlineCallbacks
     @log_function
@@ -274,14 +333,16 @@ class FederationServer(FederationBase):
 
     @defer.inlineCallbacks
     def on_event_auth(self, origin, room_id, event_id):
-        time_now = self._clock.time_msec()
-        auth_pdus = yield self.handler.on_event_auth(event_id)
-        defer.returnValue((200, {
-            "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
-        }))
+        with (yield self._server_linearizer.queue((origin, room_id))):
+            time_now = self._clock.time_msec()
+            auth_pdus = yield self.handler.on_event_auth(event_id)
+            res = {
+                "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
+            }
+        defer.returnValue((200, res))
 
     @defer.inlineCallbacks
-    def on_query_auth_request(self, origin, content, event_id):
+    def on_query_auth_request(self, origin, content, room_id, event_id):
         """
         Content is a dict with keys::
             auth_chain (list): A list of events that give the auth chain.
@@ -300,58 +361,41 @@ class FederationServer(FederationBase):
         Returns:
             Deferred: Results in `dict` with the same format as `content`
         """
-        auth_chain = [
-            self.event_from_pdu_json(e)
-            for e in content["auth_chain"]
-        ]
-
-        signed_auth = yield self._check_sigs_and_hash_and_fetch(
-            origin, auth_chain, outlier=True
-        )
+        with (yield self._server_linearizer.queue((origin, room_id))):
+            auth_chain = [
+                self.event_from_pdu_json(e)
+                for e in content["auth_chain"]
+            ]
+
+            signed_auth = yield self._check_sigs_and_hash_and_fetch(
+                origin, auth_chain, outlier=True
+            )
 
-        ret = yield self.handler.on_query_auth(
-            origin,
-            event_id,
-            signed_auth,
-            content.get("rejects", []),
-            content.get("missing", []),
-        )
+            ret = yield self.handler.on_query_auth(
+                origin,
+                event_id,
+                signed_auth,
+                content.get("rejects", []),
+                content.get("missing", []),
+            )
 
-        time_now = self._clock.time_msec()
-        send_content = {
-            "auth_chain": [
-                e.get_pdu_json(time_now)
-                for e in ret["auth_chain"]
-            ],
-            "rejects": ret.get("rejects", []),
-            "missing": ret.get("missing", []),
-        }
+            time_now = self._clock.time_msec()
+            send_content = {
+                "auth_chain": [
+                    e.get_pdu_json(time_now)
+                    for e in ret["auth_chain"]
+                ],
+                "rejects": ret.get("rejects", []),
+                "missing": ret.get("missing", []),
+            }
 
         defer.returnValue(
             (200, send_content)
         )
 
-    @defer.inlineCallbacks
     @log_function
     def on_query_client_keys(self, origin, content):
-        query = []
-        for user_id, device_ids in content.get("device_keys", {}).items():
-            if not device_ids:
-                query.append((user_id, None))
-            else:
-                for device_id in device_ids:
-                    query.append((user_id, device_id))
-
-        results = yield self.store.get_e2e_device_keys(query)
-
-        json_result = {}
-        for user_id, device_keys in results.items():
-            for device_id, json_bytes in device_keys.items():
-                json_result.setdefault(user_id, {})[device_id] = json.loads(
-                    json_bytes
-                )
-
-        defer.returnValue({"device_keys": json_result})
+        return self.on_query_request("client_keys", content)
 
     @defer.inlineCallbacks
     @log_function
@@ -377,11 +421,24 @@ class FederationServer(FederationBase):
     @log_function
     def on_get_missing_events(self, origin, room_id, earliest_events,
                               latest_events, limit, min_depth):
-        missing_events = yield self.handler.on_get_missing_events(
-            origin, room_id, earliest_events, latest_events, limit, min_depth
-        )
+        with (yield self._server_linearizer.queue((origin, room_id))):
+            logger.info(
+                "on_get_missing_events: earliest_events: %r, latest_events: %r,"
+                " limit: %d, min_depth: %d",
+                earliest_events, latest_events, limit, min_depth
+            )
+            missing_events = yield self.handler.on_get_missing_events(
+                origin, room_id, earliest_events, latest_events, limit, min_depth
+            )
 
-        time_now = self._clock.time_msec()
+            if len(missing_events) < 5:
+                logger.info(
+                    "Returning %d events: %r", len(missing_events), missing_events
+                )
+            else:
+                logger.info("Returning %d events", len(missing_events))
+
+            time_now = self._clock.time_msec()
 
         defer.returnValue({
             "events": [ev.get_pdu_json(time_now) for ev in missing_events],
@@ -481,42 +538,59 @@ class FederationServer(FederationBase):
                 pdu.internal_metadata.outlier = True
             elif min_depth and pdu.depth > min_depth:
                 if get_missing and prevs - seen:
-                    latest = yield self.store.get_latest_event_ids_in_room(
-                        pdu.room_id
-                    )
-
-                    # We add the prev events that we have seen to the latest
-                    # list to ensure the remote server doesn't give them to us
-                    latest = set(latest)
-                    latest |= seen
-
-                    missing_events = yield self.get_missing_events(
-                        origin,
-                        pdu.room_id,
-                        earliest_events_ids=list(latest),
-                        latest_events=[pdu],
-                        limit=10,
-                        min_depth=min_depth,
-                    )
-
-                    # We want to sort these by depth so we process them and
-                    # tell clients about them in order.
-                    missing_events.sort(key=lambda x: x.depth)
-
-                    for e in missing_events:
-                        yield self._handle_new_pdu(
-                            origin,
-                            e,
-                            get_missing=False
-                        )
-
-                    have_seen = yield self.store.have_events(
-                        [ev for ev, _ in pdu.prev_events]
-                    )
+                    # If we're missing stuff, ensure we only fetch stuff one
+                    # at a time.
+                    with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
+                        # We recalculate seen, since it may have changed.
+                        have_seen = yield self.store.have_events(prevs)
+                        seen = set(have_seen.keys())
+
+                        if prevs - seen:
+                            latest = yield self.store.get_latest_event_ids_in_room(
+                                pdu.room_id
+                            )
+
+                            # We add the prev events that we have seen to the latest
+                            # list to ensure the remote server doesn't give them to us
+                            latest = set(latest)
+                            latest |= seen
+
+                            logger.info(
+                                "Missing %d events for room %r: %r...",
+                                len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
+                            )
+
+                            missing_events = yield self.get_missing_events(
+                                origin,
+                                pdu.room_id,
+                                earliest_events_ids=list(latest),
+                                latest_events=[pdu],
+                                limit=10,
+                                min_depth=min_depth,
+                            )
+
+                            # We want to sort these by depth so we process them and
+                            # tell clients about them in order.
+                            missing_events.sort(key=lambda x: x.depth)
+
+                            for e in missing_events:
+                                yield self._handle_new_pdu(
+                                    origin,
+                                    e,
+                                    get_missing=False
+                                )
+
+                            have_seen = yield self.store.have_events(
+                                [ev for ev, _ in pdu.prev_events]
+                            )
 
             prevs = {e_id for e_id, _ in pdu.prev_events}
             seen = set(have_seen.keys())
             if prevs - seen:
+                logger.info(
+                    "Still missing %d events for room %r: %r...",
+                    len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
+                )
                 fetch_state = True
 
         if fetch_state:
@@ -531,7 +605,7 @@ class FederationServer(FederationBase):
                     origin, pdu.room_id, pdu.event_id,
                 )
             except:
-                logger.warn("Failed to get state for event: %s", pdu.event_id)
+                logger.exception("Failed to get state for event: %s", pdu.event_id)
 
         yield self.handler.on_receive_pdu(
             origin,
diff --git a/synapse/federation/replication.py b/synapse/federation/replication.py
index 3e062a5eab..ea66a5dcbc 100644
--- a/synapse/federation/replication.py
+++ b/synapse/federation/replication.py
@@ -72,5 +72,7 @@ class ReplicationLayer(FederationClient, FederationServer):
 
         self.hs = hs
 
+        super(ReplicationLayer, self).__init__(hs)
+
     def __str__(self):
         return "<ReplicationLayer(%s)>" % self.server_name
diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py
index 5787f854d4..cb2ef0210c 100644
--- a/synapse/federation/transaction_queue.py
+++ b/synapse/federation/transaction_queue.py
@@ -21,11 +21,11 @@ from .units import Transaction
 
 from synapse.api.errors import HttpResponseException
 from synapse.util.async import run_on_reactor
-from synapse.util.logutils import log_function
-from synapse.util.logcontext import PreserveLoggingContext
+from synapse.util.logcontext import preserve_context_over_fn
 from synapse.util.retryutils import (
     get_retry_limiter, NotRetryingDestination,
 )
+from synapse.util.metrics import measure_func
 import synapse.metrics
 
 import logging
@@ -51,7 +51,7 @@ class TransactionQueue(object):
 
         self.transport_layer = transport_layer
 
-        self._clock = hs.get_clock()
+        self.clock = hs.get_clock()
 
         # Is a mapping from destinations -> deferreds. Used to keep track
         # of which destinations have transactions in flight and when they are
@@ -82,7 +82,7 @@ class TransactionQueue(object):
         self.pending_failures_by_dest = {}
 
         # HACK to get unique tx id
-        self._next_txn_id = int(self._clock.time_msec())
+        self._next_txn_id = int(self.clock.time_msec())
 
     def can_send_to(self, destination):
         """Can we send messages to the given server?
@@ -119,266 +119,215 @@ class TransactionQueue(object):
         if not destinations:
             return
 
-        deferreds = []
-
         for destination in destinations:
-            deferred = defer.Deferred()
             self.pending_pdus_by_dest.setdefault(destination, []).append(
-                (pdu, deferred, order)
+                (pdu, order)
             )
 
-            def chain(failure):
-                if not deferred.called:
-                    deferred.errback(failure)
-
-            def log_failure(f):
-                logger.warn("Failed to send pdu to %s: %s", destination, f.value)
-
-            deferred.addErrback(log_failure)
-
-            with PreserveLoggingContext():
-                self._attempt_new_transaction(destination).addErrback(chain)
-
-            deferreds.append(deferred)
+            preserve_context_over_fn(
+                self._attempt_new_transaction, destination
+            )
 
-    # NO inlineCallbacks
     def enqueue_edu(self, edu):
         destination = edu.destination
 
         if not self.can_send_to(destination):
             return
 
-        deferred = defer.Deferred()
-        self.pending_edus_by_dest.setdefault(destination, []).append(
-            (edu, deferred)
-        )
+        self.pending_edus_by_dest.setdefault(destination, []).append(edu)
 
-        def chain(failure):
-            if not deferred.called:
-                deferred.errback(failure)
-
-        def log_failure(f):
-            logger.warn("Failed to send edu to %s: %s", destination, f.value)
-
-        deferred.addErrback(log_failure)
-
-        with PreserveLoggingContext():
-            self._attempt_new_transaction(destination).addErrback(chain)
-
-        return deferred
+        preserve_context_over_fn(
+            self._attempt_new_transaction, destination
+        )
 
-    @defer.inlineCallbacks
     def enqueue_failure(self, failure, destination):
         if destination == self.server_name or destination == "localhost":
             return
 
-        deferred = defer.Deferred()
-
         if not self.can_send_to(destination):
             return
 
         self.pending_failures_by_dest.setdefault(
             destination, []
-        ).append(
-            (failure, deferred)
-        )
-
-        def chain(f):
-            if not deferred.called:
-                deferred.errback(f)
-
-        def log_failure(f):
-            logger.warn("Failed to send failure to %s: %s", destination, f.value)
-
-        deferred.addErrback(log_failure)
-
-        with PreserveLoggingContext():
-            self._attempt_new_transaction(destination).addErrback(chain)
+        ).append(failure)
 
-        yield deferred
+        preserve_context_over_fn(
+            self._attempt_new_transaction, destination
+        )
 
     @defer.inlineCallbacks
-    @log_function
     def _attempt_new_transaction(self, destination):
         yield run_on_reactor()
+        while True:
+            # list of (pending_pdu, deferred, order)
+            if destination in self.pending_transactions:
+                # XXX: pending_transactions can get stuck on by a never-ending
+                # request at which point pending_pdus_by_dest just keeps growing.
+                # we need application-layer timeouts of some flavour of these
+                # requests
+                logger.debug(
+                    "TX [%s] Transaction already in progress",
+                    destination
+                )
+                return
 
-        # list of (pending_pdu, deferred, order)
-        if destination in self.pending_transactions:
-            # XXX: pending_transactions can get stuck on by a never-ending
-            # request at which point pending_pdus_by_dest just keeps growing.
-            # we need application-layer timeouts of some flavour of these
-            # requests
-            logger.debug(
-                "TX [%s] Transaction already in progress",
-                destination
-            )
-            return
-
-        pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
-        pending_edus = self.pending_edus_by_dest.pop(destination, [])
-        pending_failures = self.pending_failures_by_dest.pop(destination, [])
+            pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
+            pending_edus = self.pending_edus_by_dest.pop(destination, [])
+            pending_failures = self.pending_failures_by_dest.pop(destination, [])
 
-        if pending_pdus:
-            logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
-                         destination, len(pending_pdus))
+            if pending_pdus:
+                logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
+                             destination, len(pending_pdus))
 
-        if not pending_pdus and not pending_edus and not pending_failures:
-            logger.debug("TX [%s] Nothing to send", destination)
-            return
+            if not pending_pdus and not pending_edus and not pending_failures:
+                logger.debug("TX [%s] Nothing to send", destination)
+                return
 
-        try:
-            self.pending_transactions[destination] = 1
+            yield self._send_new_transaction(
+                destination, pending_pdus, pending_edus, pending_failures
+            )
 
-            logger.debug("TX [%s] _attempt_new_transaction", destination)
+    @measure_func("_send_new_transaction")
+    @defer.inlineCallbacks
+    def _send_new_transaction(self, destination, pending_pdus, pending_edus,
+                              pending_failures):
 
             # Sort based on the order field
-            pending_pdus.sort(key=lambda t: t[2])
-
+            pending_pdus.sort(key=lambda t: t[1])
             pdus = [x[0] for x in pending_pdus]
-            edus = [x[0] for x in pending_edus]
-            failures = [x[0].get_dict() for x in pending_failures]
-            deferreds = [
-                x[1]
-                for x in pending_pdus + pending_edus + pending_failures
-            ]
-
-            txn_id = str(self._next_txn_id)
-
-            limiter = yield get_retry_limiter(
-                destination,
-                self._clock,
-                self.store,
-            )
+            edus = pending_edus
+            failures = [x.get_dict() for x in pending_failures]
 
-            logger.debug(
-                "TX [%s] {%s} Attempting new transaction"
-                " (pdus: %d, edus: %d, failures: %d)",
-                destination, txn_id,
-                len(pending_pdus),
-                len(pending_edus),
-                len(pending_failures)
-            )
+            try:
+                self.pending_transactions[destination] = 1
 
-            logger.debug("TX [%s] Persisting transaction...", destination)
+                logger.debug("TX [%s] _attempt_new_transaction", destination)
 
-            transaction = Transaction.create_new(
-                origin_server_ts=int(self._clock.time_msec()),
-                transaction_id=txn_id,
-                origin=self.server_name,
-                destination=destination,
-                pdus=pdus,
-                edus=edus,
-                pdu_failures=failures,
-            )
+                txn_id = str(self._next_txn_id)
 
-            self._next_txn_id += 1
+                limiter = yield get_retry_limiter(
+                    destination,
+                    self.clock,
+                    self.store,
+                )
 
-            yield self.transaction_actions.prepare_to_send(transaction)
+                logger.debug(
+                    "TX [%s] {%s} Attempting new transaction"
+                    " (pdus: %d, edus: %d, failures: %d)",
+                    destination, txn_id,
+                    len(pending_pdus),
+                    len(pending_edus),
+                    len(pending_failures)
+                )
 
-            logger.debug("TX [%s] Persisted transaction", destination)
-            logger.info(
-                "TX [%s] {%s} Sending transaction [%s],"
-                " (PDUs: %d, EDUs: %d, failures: %d)",
-                destination, txn_id,
-                transaction.transaction_id,
-                len(pending_pdus),
-                len(pending_edus),
-                len(pending_failures),
-            )
+                logger.debug("TX [%s] Persisting transaction...", destination)
 
-            with limiter:
-                # Actually send the transaction
-
-                # FIXME (erikj): This is a bit of a hack to make the Pdu age
-                # keys work
-                def json_data_cb():
-                    data = transaction.get_dict()
-                    now = int(self._clock.time_msec())
-                    if "pdus" in data:
-                        for p in data["pdus"]:
-                            if "age_ts" in p:
-                                unsigned = p.setdefault("unsigned", {})
-                                unsigned["age"] = now - int(p["age_ts"])
-                                del p["age_ts"]
-                    return data
-
-                try:
-                    response = yield self.transport_layer.send_transaction(
-                        transaction, json_data_cb
-                    )
-                    code = 200
-
-                    if response:
-                        for e_id, r in response.get("pdus", {}).items():
-                            if "error" in r:
-                                logger.warn(
-                                    "Transaction returned error for %s: %s",
-                                    e_id, r,
-                                )
-                except HttpResponseException as e:
-                    code = e.code
-                    response = e.response
+                transaction = Transaction.create_new(
+                    origin_server_ts=int(self.clock.time_msec()),
+                    transaction_id=txn_id,
+                    origin=self.server_name,
+                    destination=destination,
+                    pdus=pdus,
+                    edus=edus,
+                    pdu_failures=failures,
+                )
+
+                self._next_txn_id += 1
+
+                yield self.transaction_actions.prepare_to_send(transaction)
 
+                logger.debug("TX [%s] Persisted transaction", destination)
                 logger.info(
-                    "TX [%s] {%s} got %d response",
-                    destination, txn_id, code
+                    "TX [%s] {%s} Sending transaction [%s],"
+                    " (PDUs: %d, EDUs: %d, failures: %d)",
+                    destination, txn_id,
+                    transaction.transaction_id,
+                    len(pending_pdus),
+                    len(pending_edus),
+                    len(pending_failures),
                 )
 
-                logger.debug("TX [%s] Sent transaction", destination)
-                logger.debug("TX [%s] Marking as delivered...", destination)
+                with limiter:
+                    # Actually send the transaction
+
+                    # FIXME (erikj): This is a bit of a hack to make the Pdu age
+                    # keys work
+                    def json_data_cb():
+                        data = transaction.get_dict()
+                        now = int(self.clock.time_msec())
+                        if "pdus" in data:
+                            for p in data["pdus"]:
+                                if "age_ts" in p:
+                                    unsigned = p.setdefault("unsigned", {})
+                                    unsigned["age"] = now - int(p["age_ts"])
+                                    del p["age_ts"]
+                        return data
+
+                    try:
+                        response = yield self.transport_layer.send_transaction(
+                            transaction, json_data_cb
+                        )
+                        code = 200
+
+                        if response:
+                            for e_id, r in response.get("pdus", {}).items():
+                                if "error" in r:
+                                    logger.warn(
+                                        "Transaction returned error for %s: %s",
+                                        e_id, r,
+                                    )
+                    except HttpResponseException as e:
+                        code = e.code
+                        response = e.response
+
+                    logger.info(
+                        "TX [%s] {%s} got %d response",
+                        destination, txn_id, code
+                    )
 
-            yield self.transaction_actions.delivered(
-                transaction, code, response
-            )
+                    logger.debug("TX [%s] Sent transaction", destination)
+                    logger.debug("TX [%s] Marking as delivered...", destination)
 
-            logger.debug("TX [%s] Marked as delivered", destination)
-
-            logger.debug("TX [%s] Yielding to callbacks...", destination)
-
-            for deferred in deferreds:
-                if code == 200:
-                    deferred.callback(None)
-                else:
-                    deferred.errback(RuntimeError("Got status %d" % code))
-
-                # Ensures we don't continue until all callbacks on that
-                # deferred have fired
-                try:
-                    yield deferred
-                except:
-                    pass
-
-            logger.debug("TX [%s] Yielded to callbacks", destination)
-        except NotRetryingDestination:
-            logger.info(
-                "TX [%s] not ready for retry yet - "
-                "dropping transaction for now",
-                destination,
-            )
-        except RuntimeError as e:
-            # We capture this here as there as nothing actually listens
-            # for this finishing functions deferred.
-            logger.warn(
-                "TX [%s] Problem in _attempt_transaction: %s",
-                destination,
-                e,
-            )
-        except Exception as e:
-            # We capture this here as there as nothing actually listens
-            # for this finishing functions deferred.
-            logger.warn(
-                "TX [%s] Problem in _attempt_transaction: %s",
-                destination,
-                e,
-            )
+                yield self.transaction_actions.delivered(
+                    transaction, code, response
+                )
 
-            for deferred in deferreds:
-                if not deferred.called:
-                    deferred.errback(e)
+                logger.debug("TX [%s] Marked as delivered", destination)
+
+                if code != 200:
+                    for p in pdus:
+                        logger.info(
+                            "Failed to send event %s to %s", p.event_id, destination
+                        )
+            except NotRetryingDestination:
+                logger.info(
+                    "TX [%s] not ready for retry yet - "
+                    "dropping transaction for now",
+                    destination,
+                )
+            except RuntimeError as e:
+                # We capture this here as there as nothing actually listens
+                # for this finishing functions deferred.
+                logger.warn(
+                    "TX [%s] Problem in _attempt_transaction: %s",
+                    destination,
+                    e,
+                )
+
+                for p in pdus:
+                    logger.info("Failed to send event %s to %s", p.event_id, destination)
+            except Exception as e:
+                # We capture this here as there as nothing actually listens
+                # for this finishing functions deferred.
+                logger.warn(
+                    "TX [%s] Problem in _attempt_transaction: %s",
+                    destination,
+                    e,
+                )
 
-        finally:
-            # We want to be *very* sure we delete this after we stop processing
-            self.pending_transactions.pop(destination, None)
+                for p in pdus:
+                    logger.info("Failed to send event %s to %s", p.event_id, destination)
 
-            # Check to see if there is anything else to send.
-            self._attempt_new_transaction(destination)
+            finally:
+                # We want to be *very* sure we delete this after we stop processing
+                self.pending_transactions.pop(destination, None)
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index cd2841c4db..3d088e43cb 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -55,6 +55,28 @@ class TransportLayerClient(object):
         )
 
     @log_function
+    def get_room_state_ids(self, destination, room_id, event_id):
+        """ Requests all state for a given room from the given server at the
+        given event. Returns the state's event_id's
+
+        Args:
+            destination (str): The host name of the remote home server we want
+                to get the state from.
+            context (str): The name of the context we want the state of
+            event_id (str): The event we want the context at.
+
+        Returns:
+            Deferred: Results in a dict received from the remote homeserver.
+        """
+        logger.debug("get_room_state_ids dest=%s, room=%s",
+                     destination, room_id)
+
+        path = PREFIX + "/state_ids/%s/" % room_id
+        return self.client.get_json(
+            destination, path=path, args={"event_id": event_id},
+        )
+
+    @log_function
     def get_event(self, destination, event_id, timeout=None):
         """ Requests the pdu with give id and origin from the given server.
 
@@ -226,6 +248,18 @@ class TransportLayerClient(object):
 
     @defer.inlineCallbacks
     @log_function
+    def get_public_rooms(self, remote_server):
+        path = PREFIX + "/publicRooms"
+
+        response = yield self.client.get_json(
+            destination=remote_server,
+            path=path,
+        )
+
+        defer.returnValue(response)
+
+    @defer.inlineCallbacks
+    @log_function
     def exchange_third_party_invite(self, destination, room_id, event_dict):
         path = PREFIX + "/exchange_third_party_invite/%s" % (room_id,)
 
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 5b6c7d11dd..37c0d4fbc4 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -18,13 +18,14 @@ from twisted.internet import defer
 from synapse.api.urls import FEDERATION_PREFIX as PREFIX
 from synapse.api.errors import Codes, SynapseError
 from synapse.http.server import JsonResource
-from synapse.http.servlet import parse_json_object_from_request, parse_string
+from synapse.http.servlet import parse_json_object_from_request
 from synapse.util.ratelimitutils import FederationRateLimiter
+from synapse.util.versionstring import get_version_string
 
 import functools
 import logging
-import simplejson as json
 import re
+import synapse
 
 
 logger = logging.getLogger(__name__)
@@ -37,7 +38,7 @@ class TransportLayerServer(JsonResource):
         self.hs = hs
         self.clock = hs.get_clock()
 
-        super(TransportLayerServer, self).__init__(hs)
+        super(TransportLayerServer, self).__init__(hs, canonical_json=False)
 
         self.authenticator = Authenticator(hs)
         self.ratelimiter = FederationRateLimiter(
@@ -60,6 +61,16 @@ class TransportLayerServer(JsonResource):
         )
 
 
+class AuthenticationError(SynapseError):
+    """There was a problem authenticating the request"""
+    pass
+
+
+class NoAuthenticationError(AuthenticationError):
+    """The request had no authentication information"""
+    pass
+
+
 class Authenticator(object):
     def __init__(self, hs):
         self.keyring = hs.get_keyring()
@@ -67,7 +78,7 @@ class Authenticator(object):
 
     # A method just so we can pass 'self' as the authenticator to the Servlets
     @defer.inlineCallbacks
-    def authenticate_request(self, request):
+    def authenticate_request(self, request, content):
         json_request = {
             "method": request.method,
             "uri": request.uri,
@@ -75,17 +86,10 @@ class Authenticator(object):
             "signatures": {},
         }
 
-        content = None
-        origin = None
+        if content is not None:
+            json_request["content"] = content
 
-        if request.method in ["PUT", "POST"]:
-            # TODO: Handle other method types? other content types?
-            try:
-                content_bytes = request.content.read()
-                content = json.loads(content_bytes)
-                json_request["content"] = content
-            except:
-                raise SynapseError(400, "Unable to parse JSON", Codes.BAD_JSON)
+        origin = None
 
         def parse_auth_header(header_str):
             try:
@@ -103,14 +107,14 @@ class Authenticator(object):
                 sig = strip_quotes(param_dict["sig"])
                 return (origin, key, sig)
             except:
-                raise SynapseError(
+                raise AuthenticationError(
                     400, "Malformed Authorization header", Codes.UNAUTHORIZED
                 )
 
         auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
 
         if not auth_headers:
-            raise SynapseError(
+            raise NoAuthenticationError(
                 401, "Missing Authorization headers", Codes.UNAUTHORIZED,
             )
 
@@ -121,7 +125,7 @@ class Authenticator(object):
                 json_request["signatures"].setdefault(origin, {})[key] = sig
 
         if not json_request["signatures"]:
-            raise SynapseError(
+            raise NoAuthenticationError(
                 401, "Missing Authorization headers", Codes.UNAUTHORIZED,
             )
 
@@ -130,38 +134,59 @@ class Authenticator(object):
         logger.info("Request from %s", origin)
         request.authenticated_entity = origin
 
-        defer.returnValue((origin, content))
+        defer.returnValue(origin)
 
 
 class BaseFederationServlet(object):
-    def __init__(self, handler, authenticator, ratelimiter, server_name):
+    REQUIRE_AUTH = True
+
+    def __init__(self, handler, authenticator, ratelimiter, server_name,
+                 room_list_handler):
         self.handler = handler
         self.authenticator = authenticator
         self.ratelimiter = ratelimiter
+        self.room_list_handler = room_list_handler
 
-    def _wrap(self, code):
+    def _wrap(self, func):
         authenticator = self.authenticator
         ratelimiter = self.ratelimiter
 
         @defer.inlineCallbacks
-        @functools.wraps(code)
-        def new_code(request, *args, **kwargs):
+        @functools.wraps(func)
+        def new_func(request, *args, **kwargs):
+            content = None
+            if request.method in ["PUT", "POST"]:
+                # TODO: Handle other method types? other content types?
+                content = parse_json_object_from_request(request)
+
             try:
-                (origin, content) = yield authenticator.authenticate_request(request)
+                origin = yield authenticator.authenticate_request(request, content)
+            except NoAuthenticationError:
+                origin = None
+                if self.REQUIRE_AUTH:
+                    logger.exception("authenticate_request failed")
+                    raise
+            except:
+                logger.exception("authenticate_request failed")
+                raise
+
+            if origin:
                 with ratelimiter.ratelimit(origin) as d:
                     yield d
-                    response = yield code(
+                    response = yield func(
                         origin, content, request.args, *args, **kwargs
                     )
-            except:
-                logger.exception("authenticate_request failed")
-                raise
+            else:
+                response = yield func(
+                    origin, content, request.args, *args, **kwargs
+                )
+
             defer.returnValue(response)
 
         # Extra logic that functools.wraps() doesn't finish
-        new_code.__self__ = code.__self__
+        new_func.__self__ = func.__self__
 
-        return new_code
+        return new_func
 
     def register(self, server):
         pattern = re.compile("^" + PREFIX + self.PATH + "$")
@@ -269,6 +294,17 @@ class FederationStateServlet(BaseFederationServlet):
         )
 
 
+class FederationStateIdsServlet(BaseFederationServlet):
+    PATH = "/state_ids/(?P<room_id>[^/]*)/"
+
+    def on_GET(self, origin, content, query, room_id):
+        return self.handler.on_state_ids_request(
+            origin,
+            room_id,
+            query.get("event_id", [None])[0],
+        )
+
+
 class FederationBackfillServlet(BaseFederationServlet):
     PATH = "/backfill/(?P<context>[^/]*)/"
 
@@ -365,10 +401,8 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
 class FederationClientKeysQueryServlet(BaseFederationServlet):
     PATH = "/user/keys/query"
 
-    @defer.inlineCallbacks
     def on_POST(self, origin, content, query):
-        response = yield self.handler.on_query_client_keys(origin, content)
-        defer.returnValue((200, response))
+        return self.handler.on_query_client_keys(origin, content)
 
 
 class FederationClientKeysClaimServlet(BaseFederationServlet):
@@ -386,7 +420,7 @@ class FederationQueryAuthServlet(BaseFederationServlet):
     @defer.inlineCallbacks
     def on_POST(self, origin, content, query, context, event_id):
         new_content = yield self.handler.on_query_auth_request(
-            origin, content, event_id
+            origin, content, context, event_id
         )
 
         defer.returnValue((200, new_content))
@@ -418,9 +452,10 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
 class On3pidBindServlet(BaseFederationServlet):
     PATH = "/3pid/onbind"
 
+    REQUIRE_AUTH = False
+
     @defer.inlineCallbacks
-    def on_POST(self, request):
-        content = parse_json_object_from_request(request)
+    def on_POST(self, origin, content, query):
         if "invites" in content:
             last_exception = None
             for invite in content["invites"]:
@@ -442,11 +477,6 @@ class On3pidBindServlet(BaseFederationServlet):
                 raise last_exception
         defer.returnValue((200, {}))
 
-    # Avoid doing remote HS authorization checks which are done by default by
-    # BaseFederationServlet.
-    def _wrap(self, code):
-        return code
-
 
 class OpenIdUserInfo(BaseFederationServlet):
     """
@@ -467,9 +497,11 @@ class OpenIdUserInfo(BaseFederationServlet):
 
     PATH = "/openid/userinfo"
 
+    REQUIRE_AUTH = False
+
     @defer.inlineCallbacks
-    def on_GET(self, request):
-        token = parse_string(request, "access_token")
+    def on_GET(self, origin, content, query):
+        token = query.get("access_token", [None])[0]
         if token is None:
             defer.returnValue((401, {
                 "errcode": "M_MISSING_TOKEN", "error": "Access Token required"
@@ -486,10 +518,58 @@ class OpenIdUserInfo(BaseFederationServlet):
 
         defer.returnValue((200, {"sub": user_id}))
 
-    # Avoid doing remote HS authorization checks which are done by default by
-    # BaseFederationServlet.
-    def _wrap(self, code):
-        return code
+
+class PublicRoomList(BaseFederationServlet):
+    """
+    Fetch the public room list for this server.
+
+    This API returns information in the same format as /publicRooms on the
+    client API, but will only ever include local public rooms and hence is
+    intended for consumption by other home servers.
+
+    GET /publicRooms HTTP/1.1
+
+    HTTP/1.1 200 OK
+    Content-Type: application/json
+
+    {
+        "chunk": [
+            {
+                "aliases": [
+                    "#test:localhost"
+                ],
+                "guest_can_join": false,
+                "name": "test room",
+                "num_joined_members": 3,
+                "room_id": "!whkydVegtvatLfXmPN:localhost",
+                "world_readable": false
+            }
+        ],
+        "end": "END",
+        "start": "START"
+    }
+    """
+
+    PATH = "/publicRooms"
+
+    @defer.inlineCallbacks
+    def on_GET(self, origin, content, query):
+        data = yield self.room_list_handler.get_local_public_room_list()
+        defer.returnValue((200, data))
+
+
+class FederationVersionServlet(BaseFederationServlet):
+    PATH = "/version"
+
+    REQUIRE_AUTH = False
+
+    def on_GET(self, origin, content, query):
+        return defer.succeed((200, {
+            "server": {
+                "name": "Synapse",
+                "version": get_version_string(synapse)
+            },
+        }))
 
 
 SERVLET_CLASSES = (
@@ -497,6 +577,7 @@ SERVLET_CLASSES = (
     FederationPullServlet,
     FederationEventServlet,
     FederationStateServlet,
+    FederationStateIdsServlet,
     FederationBackfillServlet,
     FederationQueryServlet,
     FederationMakeJoinServlet,
@@ -513,6 +594,8 @@ SERVLET_CLASSES = (
     FederationThirdPartyInviteExchangeServlet,
     On3pidBindServlet,
     OpenIdUserInfo,
+    PublicRoomList,
+    FederationVersionServlet,
 )
 
 
@@ -523,4 +606,5 @@ def register_servlets(hs, resource, authenticator, ratelimiter):
             authenticator=authenticator,
             ratelimiter=ratelimiter,
             server_name=hs.hostname,
+            room_list_handler=hs.get_room_list_handler(),
         ).register(resource)
diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py
index 9442ae6f1d..1a50a2ec98 100644
--- a/synapse/handlers/__init__.py
+++ b/synapse/handlers/__init__.py
@@ -13,11 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.appservice.scheduler import AppServiceScheduler
-from synapse.appservice.api import ApplicationServiceApi
 from .register import RegistrationHandler
 from .room import (
-    RoomCreationHandler, RoomListHandler, RoomContextHandler,
+    RoomCreationHandler, RoomContextHandler,
 )
 from .room_member import RoomMemberHandler
 from .message import MessageHandler
@@ -26,8 +24,6 @@ from .federation import FederationHandler
 from .profile import ProfileHandler
 from .directory import DirectoryHandler
 from .admin import AdminHandler
-from .appservice import ApplicationServicesHandler
-from .auth import AuthHandler
 from .identity import IdentityHandler
 from .receipts import ReceiptsHandler
 from .search import SearchHandler
@@ -35,10 +31,21 @@ from .search import SearchHandler
 
 class Handlers(object):
 
-    """ A collection of all the event handlers.
+    """ Deprecated. A collection of handlers.
 
-    There's no need to lazily create these; we'll just make them all eagerly
-    at construction time.
+    At some point most of the classes whose name ended "Handler" were
+    accessed through this class.
+
+    However this makes it painful to unit test the handlers and to run cut
+    down versions of synapse that only use specific handlers because using a
+    single handler required creating all of the handlers. So some of the
+    handlers have been lifted out of the Handlers object and are now accessed
+    directly through the homeserver object itself.
+
+    Any new handlers should follow the new pattern of being accessed through
+    the homeserver object and should not be added to the Handlers object.
+
+    The remaining handlers should be moved out of the handlers object.
     """
 
     def __init__(self, hs):
@@ -50,19 +57,9 @@ class Handlers(object):
         self.event_handler = EventHandler(hs)
         self.federation_handler = FederationHandler(hs)
         self.profile_handler = ProfileHandler(hs)
-        self.room_list_handler = RoomListHandler(hs)
         self.directory_handler = DirectoryHandler(hs)
         self.admin_handler = AdminHandler(hs)
         self.receipts_handler = ReceiptsHandler(hs)
-        asapi = ApplicationServiceApi(hs)
-        self.appservice_handler = ApplicationServicesHandler(
-            hs, asapi, AppServiceScheduler(
-                clock=hs.get_clock(),
-                store=hs.get_datastore(),
-                as_api=asapi
-            )
-        )
-        self.auth_handler = AuthHandler(hs)
         self.identity_handler = IdentityHandler(hs)
         self.search_handler = SearchHandler(hs)
         self.room_context_handler = RoomContextHandler(hs)
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index c904c6c500..11081a0cd5 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -13,14 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
-from synapse.api.errors import LimitExceededError
+import synapse.types
 from synapse.api.constants import Membership, EventTypes
-from synapse.types import UserID, Requester
-
-
-import logging
+from synapse.api.errors import LimitExceededError
+from synapse.types import UserID
 
 
 logger = logging.getLogger(__name__)
@@ -31,11 +31,15 @@ class BaseHandler(object):
     Common base class for the event handlers.
 
     Attributes:
-        store (synapse.storage.events.StateStore):
+        store (synapse.storage.DataStore):
         state_handler (synapse.state.StateHandler):
     """
 
     def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer):
+        """
         self.store = hs.get_datastore()
         self.auth = hs.get_auth()
         self.notifier = hs.get_notifier()
@@ -120,7 +124,8 @@ class BaseHandler(object):
                 # and having homeservers have their own users leave keeps more
                 # of that decision-making and control local to the guest-having
                 # homeserver.
-                requester = Requester(target_user, "", True)
+                requester = synapse.types.create_requester(
+                    target_user, is_guest=True)
                 handler = self.hs.get_handlers().room_member_handler
                 yield handler.update_membership(
                     requester,
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 75fc74c797..051ccdb380 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -17,7 +17,6 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
 from synapse.appservice import ApplicationService
-from synapse.types import UserID
 
 import logging
 
@@ -35,16 +34,13 @@ def log_failure(failure):
     )
 
 
-# NB: Purposefully not inheriting BaseHandler since that contains way too much
-# setup code which this handler does not need or use. This makes testing a lot
-# easier.
 class ApplicationServicesHandler(object):
 
-    def __init__(self, hs, appservice_api, appservice_scheduler):
+    def __init__(self, hs):
         self.store = hs.get_datastore()
-        self.hs = hs
-        self.appservice_api = appservice_api
-        self.scheduler = appservice_scheduler
+        self.is_mine_id = hs.is_mine_id
+        self.appservice_api = hs.get_application_service_api()
+        self.scheduler = hs.get_application_service_scheduler()
         self.started_scheduler = False
 
     @defer.inlineCallbacks
@@ -169,8 +165,7 @@ class ApplicationServicesHandler(object):
 
     @defer.inlineCallbacks
     def _is_unknown_user(self, user_id):
-        user = UserID.from_string(user_id)
-        if not self.hs.is_mine(user):
+        if not self.is_mine_id(user_id):
             # we don't know if they are unknown or not since it isn't one of our
             # users. We can't poke ASes.
             defer.returnValue(False)
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 68d0d78fc6..82998a81ce 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -18,8 +18,9 @@ from twisted.internet import defer
 from ._base import BaseHandler
 from synapse.api.constants import LoginType
 from synapse.types import UserID
-from synapse.api.errors import AuthError, LoginError, Codes
+from synapse.api.errors import AuthError, LoginError, Codes, StoreError, SynapseError
 from synapse.util.async import run_on_reactor
+from synapse.config.ldap import LDAPMode
 
 from twisted.web.client import PartialDownloadError
 
@@ -28,6 +29,12 @@ import bcrypt
 import pymacaroons
 import simplejson
 
+try:
+    import ldap3
+except ImportError:
+    ldap3 = None
+    pass
+
 import synapse.util.stringutils as stringutils
 
 
@@ -38,6 +45,10 @@ class AuthHandler(BaseHandler):
     SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000
 
     def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer):
+        """
         super(AuthHandler, self).__init__(hs)
         self.checkers = {
             LoginType.PASSWORD: self._check_password_auth,
@@ -50,19 +61,23 @@ class AuthHandler(BaseHandler):
         self.INVALID_TOKEN_HTTP_STATUS = 401
 
         self.ldap_enabled = hs.config.ldap_enabled
-        self.ldap_server = hs.config.ldap_server
-        self.ldap_port = hs.config.ldap_port
-        self.ldap_tls = hs.config.ldap_tls
-        self.ldap_search_base = hs.config.ldap_search_base
-        self.ldap_search_property = hs.config.ldap_search_property
-        self.ldap_email_property = hs.config.ldap_email_property
-        self.ldap_full_name_property = hs.config.ldap_full_name_property
-
-        if self.ldap_enabled is True:
-            import ldap
-            logger.info("Import ldap version: %s", ldap.__version__)
+        if self.ldap_enabled:
+            if not ldap3:
+                raise RuntimeError(
+                    'Missing ldap3 library. This is required for LDAP Authentication.'
+                )
+            self.ldap_mode = hs.config.ldap_mode
+            self.ldap_uri = hs.config.ldap_uri
+            self.ldap_start_tls = hs.config.ldap_start_tls
+            self.ldap_base = hs.config.ldap_base
+            self.ldap_filter = hs.config.ldap_filter
+            self.ldap_attributes = hs.config.ldap_attributes
+            if self.ldap_mode == LDAPMode.SEARCH:
+                self.ldap_bind_dn = hs.config.ldap_bind_dn
+                self.ldap_bind_password = hs.config.ldap_bind_password
 
         self.hs = hs  # FIXME better possibility to access registrationHandler later?
+        self.device_handler = hs.get_device_handler()
 
     @defer.inlineCallbacks
     def check_auth(self, flows, clientdict, clientip):
@@ -220,7 +235,6 @@ class AuthHandler(BaseHandler):
         sess = self._get_session_info(session_id)
         return sess.setdefault('serverdict', {}).get(key, default)
 
-    @defer.inlineCallbacks
     def _check_password_auth(self, authdict, _):
         if "user" not in authdict or "password" not in authdict:
             raise LoginError(400, "", Codes.MISSING_PARAM)
@@ -230,11 +244,7 @@ class AuthHandler(BaseHandler):
         if not user_id.startswith('@'):
             user_id = UserID.create(user_id, self.hs.hostname).to_string()
 
-        if not (yield self._check_password(user_id, password)):
-            logger.warn("Failed password login for user %s", user_id)
-            raise LoginError(403, "", errcode=Codes.FORBIDDEN)
-
-        defer.returnValue(user_id)
+        return self._check_password(user_id, password)
 
     @defer.inlineCallbacks
     def _check_recaptcha(self, authdict, clientip):
@@ -270,8 +280,17 @@ class AuthHandler(BaseHandler):
             data = pde.response
             resp_body = simplejson.loads(data)
 
-        if 'success' in resp_body and resp_body['success']:
-            defer.returnValue(True)
+        if 'success' in resp_body:
+            # Note that we do NOT check the hostname here: we explicitly
+            # intend the CAPTCHA to be presented by whatever client the
+            # user is using, we just care that they have completed a CAPTCHA.
+            logger.info(
+                "%s reCAPTCHA from hostname %s",
+                "Successful" if resp_body['success'] else "Failed",
+                resp_body.get('hostname')
+            )
+            if resp_body['success']:
+                defer.returnValue(True)
         raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
 
     @defer.inlineCallbacks
@@ -338,67 +357,84 @@ class AuthHandler(BaseHandler):
 
         return self.sessions[session_id]
 
-    @defer.inlineCallbacks
-    def login_with_password(self, user_id, password):
+    def validate_password_login(self, user_id, password):
         """
         Authenticates the user with their username and password.
 
         Used only by the v1 login API.
 
         Args:
-            user_id (str): User ID
+            user_id (str): complete @user:id
             password (str): Password
         Returns:
-            A tuple of:
-              The user's ID.
-              The access token for the user's session.
-              The refresh token for the user's session.
+            defer.Deferred: (str) canonical user id
         Raises:
-            StoreError if there was a problem storing the token.
+            StoreError if there was a problem accessing the database
             LoginError if there was an authentication problem.
         """
-
-        if not (yield self._check_password(user_id, password)):
-            logger.warn("Failed password login for user %s", user_id)
-            raise LoginError(403, "", errcode=Codes.FORBIDDEN)
-
-        logger.info("Logging in user %s", user_id)
-        access_token = yield self.issue_access_token(user_id)
-        refresh_token = yield self.issue_refresh_token(user_id)
-        defer.returnValue((user_id, access_token, refresh_token))
+        return self._check_password(user_id, password)
 
     @defer.inlineCallbacks
-    def get_login_tuple_for_user_id(self, user_id):
+    def get_login_tuple_for_user_id(self, user_id, device_id=None,
+                                    initial_display_name=None):
         """
         Gets login tuple for the user with the given user ID.
+
+        Creates a new access/refresh token for the user.
+
         The user is assumed to have been authenticated by some other
-        machanism (e.g. CAS)
+        machanism (e.g. CAS), and the user_id converted to the canonical case.
+
+        The device will be recorded in the table if it is not there already.
 
         Args:
-            user_id (str): User ID
+            user_id (str): canonical User ID
+            device_id (str|None): the device ID to associate with the tokens.
+               None to leave the tokens unassociated with a device (deprecated:
+               we should always have a device ID)
+            initial_display_name (str): display name to associate with the
+               device if it needs re-registering
         Returns:
             A tuple of:
-              The user's ID.
               The access token for the user's session.
               The refresh token for the user's session.
         Raises:
             StoreError if there was a problem storing the token.
             LoginError if there was an authentication problem.
         """
-        user_id, ignored = yield self._find_user_id_and_pwd_hash(user_id)
+        logger.info("Logging in user %s on device %s", user_id, device_id)
+        access_token = yield self.issue_access_token(user_id, device_id)
+        refresh_token = yield self.issue_refresh_token(user_id, device_id)
+
+        # the device *should* have been registered before we got here; however,
+        # it's possible we raced against a DELETE operation. The thing we
+        # really don't want is active access_tokens without a record of the
+        # device, so we double-check it here.
+        if device_id is not None:
+            yield self.device_handler.check_device_registered(
+                user_id, device_id, initial_display_name
+            )
 
-        logger.info("Logging in user %s", user_id)
-        access_token = yield self.issue_access_token(user_id)
-        refresh_token = yield self.issue_refresh_token(user_id)
-        defer.returnValue((user_id, access_token, refresh_token))
+        defer.returnValue((access_token, refresh_token))
 
     @defer.inlineCallbacks
-    def does_user_exist(self, user_id):
+    def check_user_exists(self, user_id):
+        """
+        Checks to see if a user with the given id exists. Will check case
+        insensitively, but return None if there are multiple inexact matches.
+
+        Args:
+            (str) user_id: complete @user:id
+
+        Returns:
+            defer.Deferred: (str) canonical_user_id, or None if zero or
+            multiple matches
+        """
         try:
-            yield self._find_user_id_and_pwd_hash(user_id)
-            defer.returnValue(True)
+            res = yield self._find_user_id_and_pwd_hash(user_id)
+            defer.returnValue(res[0])
         except LoginError:
-            defer.returnValue(False)
+            defer.returnValue(None)
 
     @defer.inlineCallbacks
     def _find_user_id_and_pwd_hash(self, user_id):
@@ -428,84 +464,232 @@ class AuthHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def _check_password(self, user_id, password):
-        """
+        """Authenticate a user against the LDAP and local databases.
+
+        user_id is checked case insensitively against the local database, but
+        will throw if there are multiple inexact matches.
+
+        Args:
+            user_id (str): complete @user:id
         Returns:
-            True if the user_id successfully authenticated
+            (str) the canonical_user_id
+        Raises:
+            LoginError if the password was incorrect
         """
         valid_ldap = yield self._check_ldap_password(user_id, password)
         if valid_ldap:
-            defer.returnValue(True)
-
-        valid_local_password = yield self._check_local_password(user_id, password)
-        if valid_local_password:
-            defer.returnValue(True)
+            defer.returnValue(user_id)
 
-        defer.returnValue(False)
+        result = yield self._check_local_password(user_id, password)
+        defer.returnValue(result)
 
     @defer.inlineCallbacks
     def _check_local_password(self, user_id, password):
-        try:
-            user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
-            defer.returnValue(self.validate_hash(password, password_hash))
-        except LoginError:
-            defer.returnValue(False)
+        """Authenticate a user against the local password database.
+
+        user_id is checked case insensitively, but will throw if there are
+        multiple inexact matches.
+
+        Args:
+            user_id (str): complete @user:id
+        Returns:
+            (str) the canonical_user_id
+        Raises:
+            LoginError if the password was incorrect
+        """
+        user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
+        result = self.validate_hash(password, password_hash)
+        if not result:
+            logger.warn("Failed password login for user %s", user_id)
+            raise LoginError(403, "", errcode=Codes.FORBIDDEN)
+        defer.returnValue(user_id)
 
     @defer.inlineCallbacks
     def _check_ldap_password(self, user_id, password):
-        if not self.ldap_enabled:
-            logger.debug("LDAP not configured")
+        """ Attempt to authenticate a user against an LDAP Server
+            and register an account if none exists.
+
+            Returns:
+                True if authentication against LDAP was successful
+        """
+
+        if not ldap3 or not self.ldap_enabled:
             defer.returnValue(False)
 
-        import ldap
+        if self.ldap_mode not in LDAPMode.LIST:
+            raise RuntimeError(
+                'Invalid ldap mode specified: {mode}'.format(
+                    mode=self.ldap_mode
+                )
+            )
 
-        logger.info("Authenticating %s with LDAP" % user_id)
         try:
-            ldap_url = "%s:%s" % (self.ldap_server, self.ldap_port)
-            logger.debug("Connecting LDAP server at %s" % ldap_url)
-            l = ldap.initialize(ldap_url)
-            if self.ldap_tls:
-                logger.debug("Initiating TLS")
-                self._connection.start_tls_s()
-
-            local_name = UserID.from_string(user_id).localpart
-
-            dn = "%s=%s, %s" % (
-                self.ldap_search_property,
-                local_name,
-                self.ldap_search_base)
-            logger.debug("DN for LDAP authentication: %s" % dn)
-
-            l.simple_bind_s(dn.encode('utf-8'), password.encode('utf-8'))
-
-            if not (yield self.does_user_exist(user_id)):
-                handler = self.hs.get_handlers().registration_handler
-                user_id, access_token = (
-                    yield handler.register(localpart=local_name)
+            server = ldap3.Server(self.ldap_uri)
+            logger.debug(
+                "Attempting ldap connection with %s",
+                self.ldap_uri
+            )
+
+            localpart = UserID.from_string(user_id).localpart
+            if self.ldap_mode == LDAPMode.SIMPLE:
+                # bind with the the local users ldap credentials
+                bind_dn = "{prop}={value},{base}".format(
+                    prop=self.ldap_attributes['uid'],
+                    value=localpart,
+                    base=self.ldap_base
+                )
+                conn = ldap3.Connection(server, bind_dn, password)
+                logger.debug(
+                    "Established ldap connection in simple mode: %s",
+                    conn
                 )
 
+                if self.ldap_start_tls:
+                    conn.start_tls()
+                    logger.debug(
+                        "Upgraded ldap connection in simple mode through StartTLS: %s",
+                        conn
+                    )
+
+                conn.bind()
+
+            elif self.ldap_mode == LDAPMode.SEARCH:
+                # connect with preconfigured credentials and search for local user
+                conn = ldap3.Connection(
+                    server,
+                    self.ldap_bind_dn,
+                    self.ldap_bind_password
+                )
+                logger.debug(
+                    "Established ldap connection in search mode: %s",
+                    conn
+                )
+
+                if self.ldap_start_tls:
+                    conn.start_tls()
+                    logger.debug(
+                        "Upgraded ldap connection in search mode through StartTLS: %s",
+                        conn
+                    )
+
+                conn.bind()
+
+                # find matching dn
+                query = "({prop}={value})".format(
+                    prop=self.ldap_attributes['uid'],
+                    value=localpart
+                )
+                if self.ldap_filter:
+                    query = "(&{query}{filter})".format(
+                        query=query,
+                        filter=self.ldap_filter
+                    )
+                logger.debug("ldap search filter: %s", query)
+                result = conn.search(self.ldap_base, query)
+
+                if result and len(conn.response) == 1:
+                    # found exactly one result
+                    user_dn = conn.response[0]['dn']
+                    logger.debug('ldap search found dn: %s', user_dn)
+
+                    # unbind and reconnect, rebind with found dn
+                    conn.unbind()
+                    conn = ldap3.Connection(
+                        server,
+                        user_dn,
+                        password,
+                        auto_bind=True
+                    )
+                else:
+                    # found 0 or > 1 results, abort!
+                    logger.warn(
+                        "ldap search returned unexpected (%d!=1) amount of results",
+                        len(conn.response)
+                    )
+                    defer.returnValue(False)
+
+            logger.info(
+                "User authenticated against ldap server: %s",
+                conn
+            )
+
+            # check for existing account, if none exists, create one
+            if not (yield self.check_user_exists(user_id)):
+                # query user metadata for account creation
+                query = "({prop}={value})".format(
+                    prop=self.ldap_attributes['uid'],
+                    value=localpart
+                )
+
+                if self.ldap_mode == LDAPMode.SEARCH and self.ldap_filter:
+                    query = "(&{filter}{user_filter})".format(
+                        filter=query,
+                        user_filter=self.ldap_filter
+                    )
+                logger.debug("ldap registration filter: %s", query)
+
+                result = conn.search(
+                    search_base=self.ldap_base,
+                    search_filter=query,
+                    attributes=[
+                        self.ldap_attributes['name'],
+                        self.ldap_attributes['mail']
+                    ]
+                )
+
+                if len(conn.response) == 1:
+                    attrs = conn.response[0]['attributes']
+                    mail = attrs[self.ldap_attributes['mail']][0]
+                    name = attrs[self.ldap_attributes['name']][0]
+
+                    # create account
+                    registration_handler = self.hs.get_handlers().registration_handler
+                    user_id, access_token = (
+                        yield registration_handler.register(localpart=localpart)
+                    )
+
+                    # TODO: bind email, set displayname with data from ldap directory
+
+                    logger.info(
+                        "ldap registration successful: %d: %s (%s, %)",
+                        user_id,
+                        localpart,
+                        name,
+                        mail
+                    )
+                else:
+                    logger.warn(
+                        "ldap registration failed: unexpected (%d!=1) amount of results",
+                        len(result)
+                    )
+                    defer.returnValue(False)
+
             defer.returnValue(True)
-        except ldap.LDAPError, e:
-            logger.warn("LDAP error: %s", e)
+        except ldap3.core.exceptions.LDAPException as e:
+            logger.warn("Error during ldap authentication: %s", e)
             defer.returnValue(False)
 
     @defer.inlineCallbacks
-    def issue_access_token(self, user_id):
+    def issue_access_token(self, user_id, device_id=None):
         access_token = self.generate_access_token(user_id)
-        yield self.store.add_access_token_to_user(user_id, access_token)
+        yield self.store.add_access_token_to_user(user_id, access_token,
+                                                  device_id)
         defer.returnValue(access_token)
 
     @defer.inlineCallbacks
-    def issue_refresh_token(self, user_id):
+    def issue_refresh_token(self, user_id, device_id=None):
         refresh_token = self.generate_refresh_token(user_id)
-        yield self.store.add_refresh_token_to_user(user_id, refresh_token)
+        yield self.store.add_refresh_token_to_user(user_id, refresh_token,
+                                                   device_id)
         defer.returnValue(refresh_token)
 
-    def generate_access_token(self, user_id, extra_caveats=None):
+    def generate_access_token(self, user_id, extra_caveats=None,
+                              duration_in_ms=(60 * 60 * 1000)):
         extra_caveats = extra_caveats or []
         macaroon = self._generate_base_macaroon(user_id)
         macaroon.add_first_party_caveat("type = access")
         now = self.hs.get_clock().time_msec()
-        expiry = now + (60 * 60 * 1000)
+        expiry = now + duration_in_ms
         macaroon.add_first_party_caveat("time < %d" % (expiry,))
         for caveat in extra_caveats:
             macaroon.add_first_party_caveat(caveat)
@@ -529,14 +713,20 @@ class AuthHandler(BaseHandler):
         macaroon.add_first_party_caveat("time < %d" % (expiry,))
         return macaroon.serialize()
 
+    def generate_delete_pusher_token(self, user_id):
+        macaroon = self._generate_base_macaroon(user_id)
+        macaroon.add_first_party_caveat("type = delete_pusher")
+        return macaroon.serialize()
+
     def validate_short_term_login_token_and_get_user_id(self, login_token):
+        auth_api = self.hs.get_auth()
         try:
             macaroon = pymacaroons.Macaroon.deserialize(login_token)
-            auth_api = self.hs.get_auth()
-            auth_api.validate_macaroon(macaroon, "login", True)
-            return self.get_user_from_macaroon(macaroon)
-        except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
-            raise AuthError(401, "Invalid token", errcode=Codes.UNKNOWN_TOKEN)
+            user_id = auth_api.get_user_id_from_macaroon(macaroon)
+            auth_api.validate_macaroon(macaroon, "login", True, user_id)
+            return user_id
+        except Exception:
+            raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
 
     def _generate_base_macaroon(self, user_id):
         macaroon = pymacaroons.Macaroon(
@@ -547,23 +737,18 @@ class AuthHandler(BaseHandler):
         macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
         return macaroon
 
-    def get_user_from_macaroon(self, macaroon):
-        user_prefix = "user_id = "
-        for caveat in macaroon.caveats:
-            if caveat.caveat_id.startswith(user_prefix):
-                return caveat.caveat_id[len(user_prefix):]
-        raise AuthError(
-            self.INVALID_TOKEN_HTTP_STATUS, "No user_id found in token",
-            errcode=Codes.UNKNOWN_TOKEN
-        )
-
     @defer.inlineCallbacks
     def set_password(self, user_id, newpassword, requester=None):
         password_hash = self.hash(newpassword)
 
         except_access_token_ids = [requester.access_token_id] if requester else []
 
-        yield self.store.user_set_password_hash(user_id, password_hash)
+        try:
+            yield self.store.user_set_password_hash(user_id, password_hash)
+        except StoreError as e:
+            if e.code == 404:
+                raise SynapseError(404, "Unknown user", Codes.NOT_FOUND)
+            raise e
         yield self.store.user_delete_access_tokens(
             user_id, except_access_token_ids
         )
@@ -603,7 +788,8 @@ class AuthHandler(BaseHandler):
         Returns:
             Hashed password (str).
         """
-        return bcrypt.hashpw(password, bcrypt.gensalt(self.bcrypt_rounds))
+        return bcrypt.hashpw(password + self.hs.config.password_pepper,
+                             bcrypt.gensalt(self.bcrypt_rounds))
 
     def validate_hash(self, password, stored_hash):
         """Validates that self.hash(password) == stored_hash.
@@ -616,6 +802,7 @@ class AuthHandler(BaseHandler):
             Whether self.hash(password) == stored_hash (bool).
         """
         if stored_hash:
-            return bcrypt.hashpw(password, stored_hash) == stored_hash
+            return bcrypt.hashpw(password + self.hs.config.password_pepper,
+                                 stored_hash.encode('utf-8')) == stored_hash
         else:
             return False
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
new file mode 100644
index 0000000000..8d630c6b1a
--- /dev/null
+++ b/synapse/handlers/device.py
@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.api import errors
+from synapse.util import stringutils
+from twisted.internet import defer
+from ._base import BaseHandler
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class DeviceHandler(BaseHandler):
+    def __init__(self, hs):
+        super(DeviceHandler, self).__init__(hs)
+
+    @defer.inlineCallbacks
+    def check_device_registered(self, user_id, device_id,
+                                initial_device_display_name=None):
+        """
+        If the given device has not been registered, register it with the
+        supplied display name.
+
+        If no device_id is supplied, we make one up.
+
+        Args:
+            user_id (str):  @user:id
+            device_id (str | None): device id supplied by client
+            initial_device_display_name (str | None): device display name from
+                 client
+        Returns:
+            str: device id (generated if none was supplied)
+        """
+        if device_id is not None:
+            yield self.store.store_device(
+                user_id=user_id,
+                device_id=device_id,
+                initial_device_display_name=initial_device_display_name,
+                ignore_if_known=True,
+            )
+            defer.returnValue(device_id)
+
+        # if the device id is not specified, we'll autogen one, but loop a few
+        # times in case of a clash.
+        attempts = 0
+        while attempts < 5:
+            try:
+                device_id = stringutils.random_string_with_symbols(16)
+                yield self.store.store_device(
+                    user_id=user_id,
+                    device_id=device_id,
+                    initial_device_display_name=initial_device_display_name,
+                    ignore_if_known=False,
+                )
+                defer.returnValue(device_id)
+            except errors.StoreError:
+                attempts += 1
+
+        raise errors.StoreError(500, "Couldn't generate a device ID.")
+
+    @defer.inlineCallbacks
+    def get_devices_by_user(self, user_id):
+        """
+        Retrieve the given user's devices
+
+        Args:
+            user_id (str):
+        Returns:
+            defer.Deferred: list[dict[str, X]]: info on each device
+        """
+
+        device_map = yield self.store.get_devices_by_user(user_id)
+
+        ips = yield self.store.get_last_client_ip_by_device(
+            devices=((user_id, device_id) for device_id in device_map.keys())
+        )
+
+        devices = device_map.values()
+        for device in devices:
+            _update_device_from_client_ips(device, ips)
+
+        defer.returnValue(devices)
+
+    @defer.inlineCallbacks
+    def get_device(self, user_id, device_id):
+        """ Retrieve the given device
+
+        Args:
+            user_id (str):
+            device_id (str):
+
+        Returns:
+            defer.Deferred: dict[str, X]: info on the device
+        Raises:
+            errors.NotFoundError: if the device was not found
+        """
+        try:
+            device = yield self.store.get_device(user_id, device_id)
+        except errors.StoreError:
+            raise errors.NotFoundError
+        ips = yield self.store.get_last_client_ip_by_device(
+            devices=((user_id, device_id),)
+        )
+        _update_device_from_client_ips(device, ips)
+        defer.returnValue(device)
+
+    @defer.inlineCallbacks
+    def delete_device(self, user_id, device_id):
+        """ Delete the given device
+
+        Args:
+            user_id (str):
+            device_id (str):
+
+        Returns:
+            defer.Deferred:
+        """
+
+        try:
+            yield self.store.delete_device(user_id, device_id)
+        except errors.StoreError, e:
+            if e.code == 404:
+                # no match
+                pass
+            else:
+                raise
+
+        yield self.store.user_delete_access_tokens(
+            user_id, device_id=device_id,
+            delete_refresh_tokens=True,
+        )
+
+        yield self.store.delete_e2e_keys_by_device(
+            user_id=user_id, device_id=device_id
+        )
+
+    @defer.inlineCallbacks
+    def update_device(self, user_id, device_id, content):
+        """ Update the given device
+
+        Args:
+            user_id (str):
+            device_id (str):
+            content (dict): body of update request
+
+        Returns:
+            defer.Deferred:
+        """
+
+        try:
+            yield self.store.update_device(
+                user_id,
+                device_id,
+                new_display_name=content.get("display_name")
+            )
+        except errors.StoreError, e:
+            if e.code == 404:
+                raise errors.NotFoundError()
+            else:
+                raise
+
+
+def _update_device_from_client_ips(device, client_ips):
+    ip = client_ips.get((device["user_id"], device["device_id"]), {})
+    device.update({
+        "last_seen_ts": ip.get("last_seen"),
+        "last_seen_ip": ip.get("ip"),
+    })
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 8eeb225811..4bea7f2b19 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -33,6 +33,7 @@ class DirectoryHandler(BaseHandler):
         super(DirectoryHandler, self).__init__(hs)
 
         self.state = hs.get_state_handler()
+        self.appservice_handler = hs.get_application_service_handler()
 
         self.federation = hs.get_replication_layer()
         self.federation.register_query_handler(
@@ -281,7 +282,7 @@ class DirectoryHandler(BaseHandler):
         )
         if not result:
             # Query AS to see if it exists
-            as_handler = self.hs.get_handlers().appservice_handler
+            as_handler = self.appservice_handler
             result = yield as_handler.query_room_alias_exists(room_alias)
         defer.returnValue(result)
 
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
new file mode 100644
index 0000000000..2c7bfd91ed
--- /dev/null
+++ b/synapse/handlers/e2e_keys.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import json
+import logging
+
+from twisted.internet import defer
+
+from synapse.api import errors
+import synapse.types
+
+logger = logging.getLogger(__name__)
+
+
+class E2eKeysHandler(object):
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.federation = hs.get_replication_layer()
+        self.is_mine_id = hs.is_mine_id
+        self.server_name = hs.hostname
+
+        # doesn't really work as part of the generic query API, because the
+        # query request requires an object POST, but we abuse the
+        # "query handler" interface.
+        self.federation.register_query_handler(
+            "client_keys", self.on_federation_query_client_keys
+        )
+
+    @defer.inlineCallbacks
+    def query_devices(self, query_body):
+        """ Handle a device key query from a client
+
+        {
+            "device_keys": {
+                "<user_id>": ["<device_id>"]
+            }
+        }
+        ->
+        {
+            "device_keys": {
+                "<user_id>": {
+                    "<device_id>": {
+                        ...
+                    }
+                }
+            }
+        }
+        """
+        device_keys_query = query_body.get("device_keys", {})
+
+        # separate users by domain.
+        # make a map from domain to user_id to device_ids
+        queries_by_domain = collections.defaultdict(dict)
+        for user_id, device_ids in device_keys_query.items():
+            user = synapse.types.UserID.from_string(user_id)
+            queries_by_domain[user.domain][user_id] = device_ids
+
+        # do the queries
+        # TODO: do these in parallel
+        results = {}
+        for destination, destination_query in queries_by_domain.items():
+            if destination == self.server_name:
+                res = yield self.query_local_devices(destination_query)
+            else:
+                res = yield self.federation.query_client_keys(
+                    destination, {"device_keys": destination_query}
+                )
+                res = res["device_keys"]
+            for user_id, keys in res.items():
+                if user_id in destination_query:
+                    results[user_id] = keys
+
+        defer.returnValue((200, {"device_keys": results}))
+
+    @defer.inlineCallbacks
+    def query_local_devices(self, query):
+        """Get E2E device keys for local users
+
+        Args:
+            query (dict[string, list[string]|None): map from user_id to a list
+                 of devices to query (None for all devices)
+
+        Returns:
+            defer.Deferred: (resolves to dict[string, dict[string, dict]]):
+                 map from user_id -> device_id -> device details
+        """
+        local_query = []
+
+        result_dict = {}
+        for user_id, device_ids in query.items():
+            if not self.is_mine_id(user_id):
+                logger.warning("Request for keys for non-local user %s",
+                               user_id)
+                raise errors.SynapseError(400, "Not a user here")
+
+            if not device_ids:
+                local_query.append((user_id, None))
+            else:
+                for device_id in device_ids:
+                    local_query.append((user_id, device_id))
+
+            # make sure that each queried user appears in the result dict
+            result_dict[user_id] = {}
+
+        results = yield self.store.get_e2e_device_keys(local_query)
+
+        # Build the result structure, un-jsonify the results, and add the
+        # "unsigned" section
+        for user_id, device_keys in results.items():
+            for device_id, device_info in device_keys.items():
+                r = json.loads(device_info["key_json"])
+                r["unsigned"] = {}
+                display_name = device_info["device_display_name"]
+                if display_name is not None:
+                    r["unsigned"]["device_display_name"] = display_name
+                result_dict[user_id][device_id] = r
+
+        defer.returnValue(result_dict)
+
+    @defer.inlineCallbacks
+    def on_federation_query_client_keys(self, query_body):
+        """ Handle a device key query from a federated server
+        """
+        device_keys_query = query_body.get("device_keys", {})
+        res = yield self.query_local_devices(device_keys_query)
+        defer.returnValue({"device_keys": res})
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 648a505e65..ff6bb475b5 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -66,10 +66,6 @@ class FederationHandler(BaseHandler):
 
         self.hs = hs
 
-        self.distributor.observe("user_joined_room", self.user_joined_room)
-
-        self.waiting_for_join_list = {}
-
         self.store = hs.get_datastore()
         self.replication_layer = hs.get_replication_layer()
         self.state_handler = hs.get_state_handler()
@@ -128,7 +124,7 @@ class FederationHandler(BaseHandler):
 
             try:
                 event_stream_id, max_stream_id = yield self._persist_auth_tree(
-                    auth_chain, state, event
+                    origin, auth_chain, state, event
                 )
             except AuthError as e:
                 raise FederationError(
@@ -253,7 +249,7 @@ class FederationHandler(BaseHandler):
                         if ev.type != EventTypes.Member:
                             continue
                         try:
-                            domain = UserID.from_string(ev.state_key).domain
+                            domain = get_domain_from_id(ev.state_key)
                         except:
                             continue
 
@@ -339,29 +335,58 @@ class FederationHandler(BaseHandler):
             state_events.update({s.event_id: s for s in state})
             events_to_state[e_id] = state
 
-        seen_events = yield self.store.have_events(
-            set(auth_events.keys()) | set(state_events.keys())
-        )
-
-        all_events = events + state_events.values() + auth_events.values()
         required_auth = set(
-            a_id for event in all_events for a_id, _ in event.auth_events
+            a_id
+            for event in events + state_events.values() + auth_events.values()
+            for a_id, _ in event.auth_events
         )
-
+        auth_events.update({
+            e_id: event_map[e_id] for e_id in required_auth if e_id in event_map
+        })
         missing_auth = required_auth - set(auth_events)
-        results = yield defer.gatherResults(
-            [
-                self.replication_layer.get_pdu(
-                    [dest],
-                    event_id,
-                    outlier=True,
-                    timeout=10000,
+        failed_to_fetch = set()
+
+        # Try and fetch any missing auth events from both DB and remote servers.
+        # We repeatedly do this until we stop finding new auth events.
+        while missing_auth - failed_to_fetch:
+            logger.info("Missing auth for backfill: %r", missing_auth)
+            ret_events = yield self.store.get_events(missing_auth - failed_to_fetch)
+            auth_events.update(ret_events)
+
+            required_auth.update(
+                a_id for event in ret_events.values() for a_id, _ in event.auth_events
+            )
+            missing_auth = required_auth - set(auth_events)
+
+            if missing_auth - failed_to_fetch:
+                logger.info(
+                    "Fetching missing auth for backfill: %r",
+                    missing_auth - failed_to_fetch
                 )
-                for event_id in missing_auth
-            ],
-            consumeErrors=True
-        ).addErrback(unwrapFirstError)
-        auth_events.update({a.event_id: a for a in results})
+
+                results = yield defer.gatherResults(
+                    [
+                        self.replication_layer.get_pdu(
+                            [dest],
+                            event_id,
+                            outlier=True,
+                            timeout=10000,
+                        )
+                        for event_id in missing_auth - failed_to_fetch
+                    ],
+                    consumeErrors=True
+                ).addErrback(unwrapFirstError)
+                auth_events.update({a.event_id: a for a in results})
+                required_auth.update(
+                    a_id for event in results for a_id, _ in event.auth_events
+                )
+                missing_auth = required_auth - set(auth_events)
+
+                failed_to_fetch = missing_auth - set(auth_events)
+
+        seen_events = yield self.store.have_events(
+            set(auth_events.keys()) | set(state_events.keys())
+        )
 
         ev_infos = []
         for a in auth_events.values():
@@ -374,6 +399,7 @@ class FederationHandler(BaseHandler):
                     (auth_events[a_id].type, auth_events[a_id].state_key):
                     auth_events[a_id]
                     for a_id, _ in a.auth_events
+                    if a_id in auth_events
                 }
             })
 
@@ -385,6 +411,7 @@ class FederationHandler(BaseHandler):
                     (auth_events[a_id].type, auth_events[a_id].state_key):
                     auth_events[a_id]
                     for a_id, _ in event_map[e_id].auth_events
+                    if a_id in auth_events
                 }
             })
 
@@ -403,7 +430,7 @@ class FederationHandler(BaseHandler):
             # previous to work out the state.
             # TODO: We can probably do something more clever here.
             yield self._handle_new_event(
-                dest, event
+                dest, event, backfilled=True,
             )
 
         defer.returnValue(events)
@@ -639,7 +666,7 @@ class FederationHandler(BaseHandler):
                 pass
 
             event_stream_id, max_stream_id = yield self._persist_auth_tree(
-                auth_chain, state, event
+                origin, auth_chain, state, event
             )
 
             with PreserveLoggingContext():
@@ -690,7 +717,9 @@ class FederationHandler(BaseHandler):
             logger.warn("Failed to create join %r because %s", event, e)
             raise e
 
-        self.auth.check(event, auth_events=context.current_state)
+        # The remote hasn't signed it yet, obviously. We'll do the full checks
+        # when we get the event back in `on_send_join_request`
+        self.auth.check(event, auth_events=context.current_state, do_sig_check=False)
 
         defer.returnValue(event)
 
@@ -920,7 +949,9 @@ class FederationHandler(BaseHandler):
         )
 
         try:
-            self.auth.check(event, auth_events=context.current_state)
+            # The remote hasn't signed it yet, obviously. We'll do the full checks
+            # when we get the event back in `on_send_leave_request`
+            self.auth.check(event, auth_events=context.current_state, do_sig_check=False)
         except AuthError as e:
             logger.warn("Failed to create new leave %r because %s", event, e)
             raise e
@@ -989,14 +1020,9 @@ class FederationHandler(BaseHandler):
         defer.returnValue(None)
 
     @defer.inlineCallbacks
-    def get_state_for_pdu(self, origin, room_id, event_id, do_auth=True):
+    def get_state_for_pdu(self, room_id, event_id):
         yield run_on_reactor()
 
-        if do_auth:
-            in_room = yield self.auth.check_host_in_room(room_id, origin)
-            if not in_room:
-                raise AuthError(403, "Host not in room.")
-
         state_groups = yield self.store.get_state_groups(
             room_id, [event_id]
         )
@@ -1020,13 +1046,16 @@ class FederationHandler(BaseHandler):
 
             res = results.values()
             for event in res:
-                event.signatures.update(
-                    compute_event_signature(
-                        event,
-                        self.hs.hostname,
-                        self.hs.config.signing_key[0]
+                # We sign these again because there was a bug where we
+                # incorrectly signed things the first time round
+                if self.hs.is_mine_id(event.event_id):
+                    event.signatures.update(
+                        compute_event_signature(
+                            event,
+                            self.hs.hostname,
+                            self.hs.config.signing_key[0]
+                        )
                     )
-                )
 
             defer.returnValue(res)
         else:
@@ -1064,16 +1093,17 @@ class FederationHandler(BaseHandler):
         )
 
         if event:
-            # FIXME: This is a temporary work around where we occasionally
-            # return events slightly differently than when they were
-            # originally signed
-            event.signatures.update(
-                compute_event_signature(
-                    event,
-                    self.hs.hostname,
-                    self.hs.config.signing_key[0]
+            if self.hs.is_mine_id(event.event_id):
+                # FIXME: This is a temporary work around where we occasionally
+                # return events slightly differently than when they were
+                # originally signed
+                event.signatures.update(
+                    compute_event_signature(
+                        event,
+                        self.hs.hostname,
+                        self.hs.config.signing_key[0]
+                    )
                 )
-            )
 
             if do_auth:
                 in_room = yield self.auth.check_host_in_room(
@@ -1083,6 +1113,12 @@ class FederationHandler(BaseHandler):
                 if not in_room:
                     raise AuthError(403, "Host not in room.")
 
+                events = yield self._filter_events_for_server(
+                    origin, event.room_id, [event]
+                )
+
+                event = events[0]
+
             defer.returnValue(event)
         else:
             defer.returnValue(None)
@@ -1091,15 +1127,6 @@ class FederationHandler(BaseHandler):
     def get_min_depth_for_context(self, context):
         return self.store.get_min_depth(context)
 
-    @log_function
-    def user_joined_room(self, user, room_id):
-        waiters = self.waiting_for_join_list.get(
-            (user.to_string(), room_id),
-            []
-        )
-        while waiters:
-            waiters.pop().callback(None)
-
     @defer.inlineCallbacks
     @log_function
     def _handle_new_event(self, origin, event, state=None, auth_events=None,
@@ -1122,11 +1149,12 @@ class FederationHandler(BaseHandler):
             backfilled=backfilled,
         )
 
-        # this intentionally does not yield: we don't care about the result
-        # and don't need to wait for it.
-        preserve_fn(self.hs.get_pusherpool().on_new_notifications)(
-            event_stream_id, max_stream_id
-        )
+        if not backfilled:
+            # this intentionally does not yield: we don't care about the result
+            # and don't need to wait for it.
+            preserve_fn(self.hs.get_pusherpool().on_new_notifications)(
+                event_stream_id, max_stream_id
+            )
 
         defer.returnValue((context, event_stream_id, max_stream_id))
 
@@ -1158,11 +1186,19 @@ class FederationHandler(BaseHandler):
         )
 
     @defer.inlineCallbacks
-    def _persist_auth_tree(self, auth_events, state, event):
+    def _persist_auth_tree(self, origin, auth_events, state, event):
         """Checks the auth chain is valid (and passes auth checks) for the
         state and event. Then persists the auth chain and state atomically.
         Persists the event seperately.
 
+        Will attempt to fetch missing auth events.
+
+        Args:
+            origin (str): Where the events came from
+            auth_events (list)
+            state (list)
+            event (Event)
+
         Returns:
             2-tuple of (event_stream_id, max_stream_id) from the persist_event
             call for `event`
@@ -1175,7 +1211,7 @@ class FederationHandler(BaseHandler):
 
         event_map = {
             e.event_id: e
-            for e in auth_events
+            for e in itertools.chain(auth_events, state, [event])
         }
 
         create_event = None
@@ -1184,10 +1220,29 @@ class FederationHandler(BaseHandler):
                 create_event = e
                 break
 
+        missing_auth_events = set()
+        for e in itertools.chain(auth_events, state, [event]):
+            for e_id, _ in e.auth_events:
+                if e_id not in event_map:
+                    missing_auth_events.add(e_id)
+
+        for e_id in missing_auth_events:
+            m_ev = yield self.replication_layer.get_pdu(
+                [origin],
+                e_id,
+                outlier=True,
+                timeout=10000,
+            )
+            if m_ev and m_ev.event_id == e_id:
+                event_map[e_id] = m_ev
+            else:
+                logger.info("Failed to find auth event %r", e_id)
+
         for e in itertools.chain(auth_events, state, [event]):
             auth_for_e = {
                 (event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
                 for e_id, _ in e.auth_events
+                if e_id in event_map
             }
             if create_event:
                 auth_for_e[(EventTypes.Create, "")] = create_event
@@ -1421,7 +1476,7 @@ class FederationHandler(BaseHandler):
                 local_view = dict(auth_events)
                 remote_view = dict(auth_events)
                 remote_view.update({
-                    (d.type, d.state_key): d for d in different_events
+                    (d.type, d.state_key): d for d in different_events if d
                 })
 
                 new_state, prev_state = self.state_handler.resolve_events(
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 656ce124f9..559e5d5a71 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -21,7 +21,7 @@ from synapse.api.errors import (
 )
 from ._base import BaseHandler
 from synapse.util.async import run_on_reactor
-from synapse.api.errors import SynapseError
+from synapse.api.errors import SynapseError, Codes
 
 import json
 import logging
@@ -41,6 +41,20 @@ class IdentityHandler(BaseHandler):
             hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
         )
 
+    def _should_trust_id_server(self, id_server):
+        if id_server not in self.trusted_id_servers:
+            if self.trust_any_id_server_just_for_testing_do_not_use:
+                logger.warn(
+                    "Trusting untrustworthy ID server %r even though it isn't"
+                    " in the trusted id list for testing because"
+                    " 'use_insecure_ssl_client_just_for_testing_do_not_use'"
+                    " is set in the config",
+                    id_server,
+                )
+            else:
+                return False
+        return True
+
     @defer.inlineCallbacks
     def threepid_from_creds(self, creds):
         yield run_on_reactor()
@@ -59,19 +73,12 @@ class IdentityHandler(BaseHandler):
         else:
             raise SynapseError(400, "No client_secret in creds")
 
-        if id_server not in self.trusted_id_servers:
-            if self.trust_any_id_server_just_for_testing_do_not_use:
-                logger.warn(
-                    "Trusting untrustworthy ID server %r even though it isn't"
-                    " in the trusted id list for testing because"
-                    " 'use_insecure_ssl_client_just_for_testing_do_not_use'"
-                    " is set in the config",
-                    id_server,
-                )
-            else:
-                logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
-                            'credentials', id_server)
-                defer.returnValue(None)
+        if not self._should_trust_id_server(id_server):
+            logger.warn(
+                '%s is not a trusted ID server: rejecting 3pid ' +
+                'credentials', id_server
+            )
+            defer.returnValue(None)
 
         data = {}
         try:
@@ -129,6 +136,12 @@ class IdentityHandler(BaseHandler):
     def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
         yield run_on_reactor()
 
+        if not self._should_trust_id_server(id_server):
+            raise SynapseError(
+                400, "Untrusted ID server '%s'" % id_server,
+                Codes.SERVER_NOT_TRUSTED
+            )
+
         params = {
             'email': email,
             'client_secret': client_secret,
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index c41dafdef5..dc76d34a52 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -26,9 +26,9 @@ from synapse.types import (
     UserID, RoomAlias, RoomStreamToken, StreamToken, get_domain_from_id
 )
 from synapse.util import unwrapFirstError
-from synapse.util.async import concurrently_execute
+from synapse.util.async import concurrently_execute, run_on_reactor, ReadWriteLock
 from synapse.util.caches.snapshot_cache import SnapshotCache
-from synapse.util.logcontext import PreserveLoggingContext, preserve_fn
+from synapse.util.logcontext import preserve_fn
 from synapse.visibility import filter_events_for_client
 
 from ._base import BaseHandler
@@ -50,9 +50,23 @@ class MessageHandler(BaseHandler):
         self.validator = EventValidator()
         self.snapshot_cache = SnapshotCache()
 
+        self.pagination_lock = ReadWriteLock()
+
+    @defer.inlineCallbacks
+    def purge_history(self, room_id, event_id):
+        event = yield self.store.get_event(event_id)
+
+        if event.room_id != room_id:
+            raise SynapseError(400, "Event is for wrong room.")
+
+        depth = event.depth
+
+        with (yield self.pagination_lock.write(room_id)):
+            yield self.store.delete_old_state(room_id, depth)
+
     @defer.inlineCallbacks
     def get_messages(self, requester, room_id=None, pagin_config=None,
-                     as_client_event=True):
+                     as_client_event=True, event_filter=None):
         """Get messages in a room.
 
         Args:
@@ -61,11 +75,11 @@ class MessageHandler(BaseHandler):
             pagin_config (synapse.api.streams.PaginationConfig): The pagination
                 config rules to apply, if any.
             as_client_event (bool): True to get events in client-server format.
+            event_filter (Filter): Filter to apply to results or None
         Returns:
             dict: Pagination API results
         """
         user_id = requester.user.to_string()
-        data_source = self.hs.get_event_sources().sources["room"]
 
         if pagin_config.from_token:
             room_token = pagin_config.from_token.room_key
@@ -85,42 +99,48 @@ class MessageHandler(BaseHandler):
 
         source_config = pagin_config.get_source_config("room")
 
-        membership, member_event_id = yield self._check_in_room_or_world_readable(
-            room_id, user_id
-        )
+        with (yield self.pagination_lock.read(room_id)):
+            membership, member_event_id = yield self._check_in_room_or_world_readable(
+                room_id, user_id
+            )
 
-        if source_config.direction == 'b':
-            # if we're going backwards, we might need to backfill. This
-            # requires that we have a topo token.
-            if room_token.topological:
-                max_topo = room_token.topological
-            else:
-                max_topo = yield self.store.get_max_topological_token_for_stream_and_room(
-                    room_id, room_token.stream
-                )
+            if source_config.direction == 'b':
+                # if we're going backwards, we might need to backfill. This
+                # requires that we have a topo token.
+                if room_token.topological:
+                    max_topo = room_token.topological
+                else:
+                    max_topo = yield self.store.get_max_topological_token(
+                        room_id, room_token.stream
+                    )
+
+                if membership == Membership.LEAVE:
+                    # If they have left the room then clamp the token to be before
+                    # they left the room, to save the effort of loading from the
+                    # database.
+                    leave_token = yield self.store.get_topological_token_for_event(
+                        member_event_id
+                    )
+                    leave_token = RoomStreamToken.parse(leave_token)
+                    if leave_token.topological < max_topo:
+                        source_config.from_key = str(leave_token)
 
-            if membership == Membership.LEAVE:
-                # If they have left the room then clamp the token to be before
-                # they left the room, to save the effort of loading from the
-                # database.
-                leave_token = yield self.store.get_topological_token_for_event(
-                    member_event_id
+                yield self.hs.get_handlers().federation_handler.maybe_backfill(
+                    room_id, max_topo
                 )
-                leave_token = RoomStreamToken.parse(leave_token)
-                if leave_token.topological < max_topo:
-                    source_config.from_key = str(leave_token)
 
-            yield self.hs.get_handlers().federation_handler.maybe_backfill(
-                room_id, max_topo
+            events, next_key = yield self.store.paginate_room_events(
+                room_id=room_id,
+                from_key=source_config.from_key,
+                to_key=source_config.to_key,
+                direction=source_config.direction,
+                limit=source_config.limit,
+                event_filter=event_filter,
             )
 
-        events, next_key = yield data_source.get_pagination_rows(
-            requester.user, source_config, room_id
-        )
-
-        next_token = pagin_config.from_token.copy_and_replace(
-            "room_key", next_key
-        )
+            next_token = pagin_config.from_token.copy_and_replace(
+                "room_key", next_key
+            )
 
         if not events:
             defer.returnValue({
@@ -129,6 +149,9 @@ class MessageHandler(BaseHandler):
                 "end": next_token.to_string(),
             })
 
+        if event_filter:
+            events = event_filter.filter(events)
+
         events = yield filter_events_for_client(
             self.store,
             user_id,
@@ -908,13 +931,16 @@ class MessageHandler(BaseHandler):
                     "Failed to get destination from event %s", s.event_id
                 )
 
-        with PreserveLoggingContext():
-            # Don't block waiting on waking up all the listeners.
+        @defer.inlineCallbacks
+        def _notify():
+            yield run_on_reactor()
             self.notifier.on_new_room_event(
                 event, event_stream_id, max_stream_id,
                 extra_users=extra_users
             )
 
+        preserve_fn(_notify)()
+
         # If invite, remove room_state from unsigned before sending.
         event.unsigned.pop("invite_room_state", None)
 
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 37f57301fb..6b70fa3817 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -50,6 +50,8 @@ timers_fired_counter = metrics.register_counter("timers_fired")
 federation_presence_counter = metrics.register_counter("federation_presence")
 bump_active_time_counter = metrics.register_counter("bump_active_time")
 
+get_updates_counter = metrics.register_counter("get_updates", labels=["type"])
+
 
 # If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
 # "currently_active"
@@ -68,6 +70,10 @@ FEDERATION_TIMEOUT = 30 * 60 * 1000
 # How often to resend presence to remote servers
 FEDERATION_PING_INTERVAL = 25 * 60 * 1000
 
+# How long we will wait before assuming that the syncs from an external process
+# are dead.
+EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000
+
 assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
 
 
@@ -158,15 +164,26 @@ class PresenceHandler(object):
         self.serial_to_user = {}
         self._next_serial = 1
 
-        # Keeps track of the number of *ongoing* syncs. While this is non zero
-        # a user will never go offline.
+        # Keeps track of the number of *ongoing* syncs on this process. While
+        # this is non zero a user will never go offline.
         self.user_to_num_current_syncs = {}
 
+        # Keeps track of the number of *ongoing* syncs on other processes.
+        # While any sync is ongoing on another process the user will never
+        # go offline.
+        # Each process has a unique identifier and an update frequency. If
+        # no update is received from that process within the update period then
+        # we assume that all the sync requests on that process have stopped.
+        # Stored as a dict from process_id to set of user_id, and a dict of
+        # process_id to millisecond timestamp last updated.
+        self.external_process_to_current_syncs = {}
+        self.external_process_last_updated_ms = {}
+
         # Start a LoopingCall in 30s that fires every 5s.
         # The initial delay is to allow disconnected clients a chance to
         # reconnect before we treat them as offline.
         self.clock.call_later(
-            30 * 1000,
+            30,
             self.clock.looping_call,
             self._handle_timeouts,
             5000,
@@ -266,31 +283,48 @@ class PresenceHandler(object):
         """Checks the presence of users that have timed out and updates as
         appropriate.
         """
+        logger.info("Handling presence timeouts")
         now = self.clock.time_msec()
 
-        with Measure(self.clock, "presence_handle_timeouts"):
-            # Fetch the list of users that *may* have timed out. Things may have
-            # changed since the timeout was set, so we won't necessarily have to
-            # take any action.
-            users_to_check = self.wheel_timer.fetch(now)
+        try:
+            with Measure(self.clock, "presence_handle_timeouts"):
+                # Fetch the list of users that *may* have timed out. Things may have
+                # changed since the timeout was set, so we won't necessarily have to
+                # take any action.
+                users_to_check = set(self.wheel_timer.fetch(now))
+
+                # Check whether the lists of syncing processes from an external
+                # process have expired.
+                expired_process_ids = [
+                    process_id for process_id, last_update
+                    in self.external_process_last_updated_ms.items()
+                    if now - last_update > EXTERNAL_PROCESS_EXPIRY
+                ]
+                for process_id in expired_process_ids:
+                    users_to_check.update(
+                        self.external_process_last_updated_ms.pop(process_id, ())
+                    )
+                    self.external_process_last_update.pop(process_id)
 
-            states = [
-                self.user_to_current_state.get(
-                    user_id, UserPresenceState.default(user_id)
-                )
-                for user_id in set(users_to_check)
-            ]
+                states = [
+                    self.user_to_current_state.get(
+                        user_id, UserPresenceState.default(user_id)
+                    )
+                    for user_id in users_to_check
+                ]
 
-            timers_fired_counter.inc_by(len(states))
+                timers_fired_counter.inc_by(len(states))
 
-            changes = handle_timeouts(
-                states,
-                is_mine_fn=self.is_mine_id,
-                user_to_num_current_syncs=self.user_to_num_current_syncs,
-                now=now,
-            )
+                changes = handle_timeouts(
+                    states,
+                    is_mine_fn=self.is_mine_id,
+                    syncing_user_ids=self.get_currently_syncing_users(),
+                    now=now,
+                )
 
-        preserve_fn(self._update_states)(changes)
+            preserve_fn(self._update_states)(changes)
+        except:
+            logger.exception("Exception in _handle_timeouts loop")
 
     @defer.inlineCallbacks
     def bump_presence_active_time(self, user):
@@ -363,6 +397,74 @@ class PresenceHandler(object):
 
         defer.returnValue(_user_syncing())
 
+    def get_currently_syncing_users(self):
+        """Get the set of user ids that are currently syncing on this HS.
+        Returns:
+            set(str): A set of user_id strings.
+        """
+        syncing_user_ids = {
+            user_id for user_id, count in self.user_to_num_current_syncs.items()
+            if count
+        }
+        for user_ids in self.external_process_to_current_syncs.values():
+            syncing_user_ids.update(user_ids)
+        return syncing_user_ids
+
+    @defer.inlineCallbacks
+    def update_external_syncs(self, process_id, syncing_user_ids):
+        """Update the syncing users for an external process
+
+        Args:
+            process_id(str): An identifier for the process the users are
+                syncing against. This allows synapse to process updates
+                as user start and stop syncing against a given process.
+            syncing_user_ids(set(str)): The set of user_ids that are
+                currently syncing on that server.
+        """
+
+        # Grab the previous list of user_ids that were syncing on that process
+        prev_syncing_user_ids = (
+            self.external_process_to_current_syncs.get(process_id, set())
+        )
+        # Grab the current presence state for both the users that are syncing
+        # now and the users that were syncing before this update.
+        prev_states = yield self.current_state_for_users(
+            syncing_user_ids | prev_syncing_user_ids
+        )
+        updates = []
+        time_now_ms = self.clock.time_msec()
+
+        # For each new user that is syncing check if we need to mark them as
+        # being online.
+        for new_user_id in syncing_user_ids - prev_syncing_user_ids:
+            prev_state = prev_states[new_user_id]
+            if prev_state.state == PresenceState.OFFLINE:
+                updates.append(prev_state.copy_and_replace(
+                    state=PresenceState.ONLINE,
+                    last_active_ts=time_now_ms,
+                    last_user_sync_ts=time_now_ms,
+                ))
+            else:
+                updates.append(prev_state.copy_and_replace(
+                    last_user_sync_ts=time_now_ms,
+                ))
+
+        # For each user that is still syncing or stopped syncing update the
+        # last sync time so that we will correctly apply the grace period when
+        # they stop syncing.
+        for old_user_id in prev_syncing_user_ids:
+            prev_state = prev_states[old_user_id]
+            updates.append(prev_state.copy_and_replace(
+                last_user_sync_ts=time_now_ms,
+            ))
+
+        yield self._update_states(updates)
+
+        # Update the last updated time for the process. We expire the entries
+        # if we don't receive an update in the given timeframe.
+        self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
+        self.external_process_to_current_syncs[process_id] = syncing_user_ids
+
     @defer.inlineCallbacks
     def current_state_for_user(self, user_id):
         """Get the current presence state for a user.
@@ -879,13 +981,13 @@ class PresenceEventSource(object):
 
             user_ids_changed = set()
             changed = None
-            if from_key and max_token - from_key < 100:
-                # For small deltas, its quicker to get all changes and then
-                # work out if we share a room or they're in our presence list
+            if from_key:
                 changed = stream_change_cache.get_all_entities_changed(from_key)
 
-            # get_all_entities_changed can return None
-            if changed is not None:
+            if changed is not None and len(changed) < 500:
+                # For small deltas, its quicker to get all changes and then
+                # work out if we share a room or they're in our presence list
+                get_updates_counter.inc("stream")
                 for other_user_id in changed:
                     if other_user_id in friends:
                         user_ids_changed.add(other_user_id)
@@ -897,6 +999,8 @@ class PresenceEventSource(object):
             else:
                 # Too many possible updates. Find all users we can see and check
                 # if any of them have changed.
+                get_updates_counter.inc("full")
+
                 user_ids_to_check = set()
                 for room_id in room_ids:
                     users = yield self.store.get_users_in_room(room_id)
@@ -935,15 +1039,14 @@ class PresenceEventSource(object):
         return self.get_new_events(user, from_key=None, include_offline=False)
 
 
-def handle_timeouts(user_states, is_mine_fn, user_to_num_current_syncs, now):
+def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now):
     """Checks the presence of users that have timed out and updates as
     appropriate.
 
     Args:
         user_states(list): List of UserPresenceState's to check.
         is_mine_fn (fn): Function that returns if a user_id is ours
-        user_to_num_current_syncs (dict): Mapping of user_id to number of currently
-            active syncs.
+        syncing_user_ids (set): Set of user_ids with active syncs.
         now (int): Current time in ms.
 
     Returns:
@@ -954,21 +1057,20 @@ def handle_timeouts(user_states, is_mine_fn, user_to_num_current_syncs, now):
     for state in user_states:
         is_mine = is_mine_fn(state.user_id)
 
-        new_state = handle_timeout(state, is_mine, user_to_num_current_syncs, now)
+        new_state = handle_timeout(state, is_mine, syncing_user_ids, now)
         if new_state:
             changes[state.user_id] = new_state
 
     return changes.values()
 
 
-def handle_timeout(state, is_mine, user_to_num_current_syncs, now):
+def handle_timeout(state, is_mine, syncing_user_ids, now):
     """Checks the presence of the user to see if any of the timers have elapsed
 
     Args:
         state (UserPresenceState)
         is_mine (bool): Whether the user is ours
-        user_to_num_current_syncs (dict): Mapping of user_id to number of currently
-            active syncs.
+        syncing_user_ids (set): Set of user_ids with active syncs.
         now (int): Current time in ms.
 
     Returns:
@@ -1002,7 +1104,7 @@ def handle_timeout(state, is_mine, user_to_num_current_syncs, now):
 
         # If there are have been no sync for a while (and none ongoing),
         # set presence to offline
-        if not user_to_num_current_syncs.get(user_id, 0):
+        if user_id not in syncing_user_ids:
             if now - state.last_user_sync_ts > SYNC_ONLINE_TIMEOUT:
                 state = state.copy_and_replace(
                     state=PresenceState.OFFLINE,
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index e37409170d..d9ac09078d 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -13,15 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from twisted.internet import defer
 
+import synapse.types
 from synapse.api.errors import SynapseError, AuthError, CodeMessageException
-from synapse.types import UserID, Requester
-
+from synapse.types import UserID
 from ._base import BaseHandler
 
-import logging
-
 
 logger = logging.getLogger(__name__)
 
@@ -36,13 +36,6 @@ class ProfileHandler(BaseHandler):
             "profile", self.on_profile_query
         )
 
-        distributor = hs.get_distributor()
-
-        distributor.observe("registered_user", self.registered_user)
-
-    def registered_user(self, user):
-        return self.store.create_profile(user.localpart)
-
     @defer.inlineCallbacks
     def get_displayname(self, target_user):
         if self.hs.is_mine(target_user):
@@ -172,7 +165,9 @@ class ProfileHandler(BaseHandler):
             try:
                 # Assume the user isn't a guest because we don't let guests set
                 # profile or avatar data.
-                requester = Requester(user, "", False)
+                # XXX why are we recreating `requester` here for each room?
+                # what was wrong with the `requester` we were passed?
+                requester = synapse.types.create_requester(user)
                 yield handler.update_membership(
                     requester,
                     user,
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 5883b9111e..dd75c4fecf 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -14,19 +14,19 @@
 # limitations under the License.
 
 """Contains functions for registering clients."""
+import logging
+import urllib
+
 from twisted.internet import defer
 
-from synapse.types import UserID
+import synapse.types
 from synapse.api.errors import (
     AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError
 )
-from ._base import BaseHandler
-from synapse.util.async import run_on_reactor
 from synapse.http.client import CaptchaServerHttpClient
-from synapse.util.distributor import registered_user
-
-import logging
-import urllib
+from synapse.types import UserID
+from synapse.util.async import run_on_reactor
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
@@ -37,8 +37,6 @@ class RegistrationHandler(BaseHandler):
         super(RegistrationHandler, self).__init__(hs)
 
         self.auth = hs.get_auth()
-        self.distributor = hs.get_distributor()
-        self.distributor.declare("registered_user")
         self.captcha_client = CaptchaServerHttpClient(hs)
 
         self._next_generated_user_id = None
@@ -55,6 +53,13 @@ class RegistrationHandler(BaseHandler):
                 Codes.INVALID_USERNAME
             )
 
+        if localpart[0] == '_':
+            raise SynapseError(
+                400,
+                "User ID may not begin with _",
+                Codes.INVALID_USERNAME
+            )
+
         user = UserID(localpart, self.hs.hostname)
         user_id = user.to_string()
 
@@ -93,7 +98,8 @@ class RegistrationHandler(BaseHandler):
         password=None,
         generate_token=True,
         guest_access_token=None,
-        make_guest=False
+        make_guest=False,
+        admin=False,
     ):
         """Registers a new client on the server.
 
@@ -101,8 +107,13 @@ class RegistrationHandler(BaseHandler):
             localpart : The local part of the user ID to register. If None,
               one will be generated.
             password (str) : The password to assign to this user so they can
-            login again. This can be None which means they cannot login again
-            via a password (e.g. the user is an application service user).
+              login again. This can be None which means they cannot login again
+              via a password (e.g. the user is an application service user).
+            generate_token (bool): Whether a new access token should be
+              generated. Having this be True should be considered deprecated,
+              since it offers no means of associating a device_id with the
+              access_token. Instead you should call auth_handler.issue_access_token
+              after registration.
         Returns:
             A tuple of (user_id, access_token).
         Raises:
@@ -140,9 +151,12 @@ class RegistrationHandler(BaseHandler):
                 password_hash=password_hash,
                 was_guest=was_guest,
                 make_guest=make_guest,
+                create_profile_with_localpart=(
+                    # If the user was a guest then they already have a profile
+                    None if was_guest else user.localpart
+                ),
+                admin=admin,
             )
-
-            yield registered_user(self.distributor, user)
         else:
             # autogen a sequential user ID
             attempts = 0
@@ -160,7 +174,8 @@ class RegistrationHandler(BaseHandler):
                         user_id=user_id,
                         token=token,
                         password_hash=password_hash,
-                        make_guest=make_guest
+                        make_guest=make_guest,
+                        create_profile_with_localpart=user.localpart,
                     )
                 except SynapseError:
                     # if user id is taken, just generate another
@@ -168,7 +183,6 @@ class RegistrationHandler(BaseHandler):
                     user_id = None
                     token = None
                     attempts += 1
-            yield registered_user(self.distributor, user)
 
         # We used to generate default identicons here, but nowadays
         # we want clients to generate their own as part of their branding
@@ -195,15 +209,13 @@ class RegistrationHandler(BaseHandler):
             user_id, allowed_appservice=service
         )
 
-        token = self.auth_handler().generate_access_token(user_id)
         yield self.store.register(
             user_id=user_id,
-            token=token,
             password_hash="",
             appservice_id=service_id,
+            create_profile_with_localpart=user.localpart,
         )
-        yield registered_user(self.distributor, user)
-        defer.returnValue((user_id, token))
+        defer.returnValue(user_id)
 
     @defer.inlineCallbacks
     def check_recaptcha(self, ip, private_key, challenge, response):
@@ -248,9 +260,9 @@ class RegistrationHandler(BaseHandler):
             yield self.store.register(
                 user_id=user_id,
                 token=token,
-                password_hash=None
+                password_hash=None,
+                create_profile_with_localpart=user.localpart,
             )
-            yield registered_user(self.distributor, user)
         except Exception as e:
             yield self.store.add_access_token_to_user(user_id, token)
             # Ignore Registration errors
@@ -359,8 +371,10 @@ class RegistrationHandler(BaseHandler):
         defer.returnValue(data)
 
     @defer.inlineCallbacks
-    def get_or_create_user(self, localpart, displayname, duration_seconds):
-        """Creates a new user or returns an access token for an existing one
+    def get_or_create_user(self, localpart, displayname, duration_in_ms,
+                           password_hash=None):
+        """Creates a new user if the user does not exist,
+        else revokes all previous access tokens and generates a new one.
 
         Args:
             localpart : The local part of the user ID to register. If None,
@@ -387,32 +401,32 @@ class RegistrationHandler(BaseHandler):
 
         user = UserID(localpart, self.hs.hostname)
         user_id = user.to_string()
-        auth_handler = self.hs.get_handlers().auth_handler
-        token = auth_handler.generate_short_term_login_token(user_id, duration_seconds)
+        token = self.auth_handler().generate_access_token(
+            user_id, None, duration_in_ms)
 
         if need_register:
             yield self.store.register(
                 user_id=user_id,
                 token=token,
-                password_hash=None
+                password_hash=password_hash,
+                create_profile_with_localpart=user.localpart,
             )
-
-            yield registered_user(self.distributor, user)
         else:
-            yield self.store.flush_user(user_id=user_id)
+            yield self.store.user_delete_access_tokens(user_id=user_id)
             yield self.store.add_access_token_to_user(user_id=user_id, token=token)
 
         if displayname is not None:
             logger.info("setting user display name: %s -> %s", user_id, displayname)
             profile_handler = self.hs.get_handlers().profile_handler
+            requester = synapse.types.create_requester(user)
             yield profile_handler.set_displayname(
-                user, user, displayname
+                user, requester, displayname
             )
 
         defer.returnValue((user_id, token))
 
     def auth_handler(self):
-        return self.hs.get_handlers().auth_handler
+        return self.hs.get_auth_handler()
 
     @defer.inlineCallbacks
     def guest_access_token_for(self, medium, address, inviter_user_id):
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 3d63b3c513..bf6b1c1535 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -20,7 +20,7 @@ from ._base import BaseHandler
 
 from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken
 from synapse.api.constants import (
-    EventTypes, JoinRules, RoomCreationPreset,
+    EventTypes, JoinRules, RoomCreationPreset, Membership,
 )
 from synapse.api.errors import AuthError, StoreError, SynapseError
 from synapse.util import stringutils
@@ -36,6 +36,8 @@ import string
 
 logger = logging.getLogger(__name__)
 
+REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000
+
 id_server_scheme = "https://"
 
 
@@ -343,9 +345,15 @@ class RoomCreationHandler(BaseHandler):
 class RoomListHandler(BaseHandler):
     def __init__(self, hs):
         super(RoomListHandler, self).__init__(hs)
-        self.response_cache = ResponseCache()
+        self.response_cache = ResponseCache(hs)
+        self.remote_list_request_cache = ResponseCache(hs)
+        self.remote_list_cache = {}
+        self.fetch_looping_call = hs.get_clock().looping_call(
+            self.fetch_all_remote_lists, REMOTE_ROOM_LIST_POLL_INTERVAL
+        )
+        self.fetch_all_remote_lists()
 
-    def get_public_room_list(self):
+    def get_local_public_room_list(self):
         result = self.response_cache.get(())
         if not result:
             result = self.response_cache.set((), self._get_public_room_list())
@@ -359,14 +367,10 @@ class RoomListHandler(BaseHandler):
 
         @defer.inlineCallbacks
         def handle_room(room_id):
-            # We pull each bit of state out indvidually to avoid pulling the
-            # full state into memory. Due to how the caching works this should
-            # be fairly quick, even if not originally in the cache.
-            def get_state(etype, state_key):
-                return self.state_handler.get_current_state(room_id, etype, state_key)
+            current_state = yield self.state_handler.get_current_state(room_id)
 
             # Double check that this is actually a public room.
-            join_rules_event = yield get_state(EventTypes.JoinRules, "")
+            join_rules_event = current_state.get((EventTypes.JoinRules, ""))
             if join_rules_event:
                 join_rule = join_rules_event.content.get("join_rule", None)
                 if join_rule and join_rule != JoinRules.PUBLIC:
@@ -374,47 +378,51 @@ class RoomListHandler(BaseHandler):
 
             result = {"room_id": room_id}
 
-            joined_users = yield self.store.get_users_in_room(room_id)
-            if len(joined_users) == 0:
+            num_joined_users = len([
+                1 for _, event in current_state.items()
+                if event.type == EventTypes.Member
+                and event.membership == Membership.JOIN
+            ])
+            if num_joined_users == 0:
                 return
 
-            result["num_joined_members"] = len(joined_users)
+            result["num_joined_members"] = num_joined_users
 
             aliases = yield self.store.get_aliases_for_room(room_id)
             if aliases:
                 result["aliases"] = aliases
 
-            name_event = yield get_state(EventTypes.Name, "")
+            name_event = yield current_state.get((EventTypes.Name, ""))
             if name_event:
                 name = name_event.content.get("name", None)
                 if name:
                     result["name"] = name
 
-            topic_event = yield get_state(EventTypes.Topic, "")
+            topic_event = current_state.get((EventTypes.Topic, ""))
             if topic_event:
                 topic = topic_event.content.get("topic", None)
                 if topic:
                     result["topic"] = topic
 
-            canonical_event = yield get_state(EventTypes.CanonicalAlias, "")
+            canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
             if canonical_event:
                 canonical_alias = canonical_event.content.get("alias", None)
                 if canonical_alias:
                     result["canonical_alias"] = canonical_alias
 
-            visibility_event = yield get_state(EventTypes.RoomHistoryVisibility, "")
+            visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
             visibility = None
             if visibility_event:
                 visibility = visibility_event.content.get("history_visibility", None)
             result["world_readable"] = visibility == "world_readable"
 
-            guest_event = yield get_state(EventTypes.GuestAccess, "")
+            guest_event = current_state.get((EventTypes.GuestAccess, ""))
             guest = None
             if guest_event:
                 guest = guest_event.content.get("guest_access", None)
             result["guest_can_join"] = guest == "can_join"
 
-            avatar_event = yield get_state("m.room.avatar", "")
+            avatar_event = current_state.get(("m.room.avatar", ""))
             if avatar_event:
                 avatar_url = avatar_event.content.get("url", None)
                 if avatar_url:
@@ -427,6 +435,55 @@ class RoomListHandler(BaseHandler):
         # FIXME (erikj): START is no longer a valid value
         defer.returnValue({"start": "START", "end": "END", "chunk": results})
 
+    @defer.inlineCallbacks
+    def fetch_all_remote_lists(self):
+        deferred = self.hs.get_replication_layer().get_public_rooms(
+            self.hs.config.secondary_directory_servers
+        )
+        self.remote_list_request_cache.set((), deferred)
+        self.remote_list_cache = yield deferred
+
+    @defer.inlineCallbacks
+    def get_aggregated_public_room_list(self):
+        """
+        Get the public room list from this server and the servers
+        specified in the secondary_directory_servers config option.
+        XXX: Pagination...
+        """
+        # We return the results from out cache which is updated by a looping call,
+        # unless we're missing a cache entry, in which case wait for the result
+        # of the fetch if there's one in progress. If not, omit that server.
+        wait = False
+        for s in self.hs.config.secondary_directory_servers:
+            if s not in self.remote_list_cache:
+                logger.warn("No cached room list from %s: waiting for fetch", s)
+                wait = True
+                break
+
+        if wait and self.remote_list_request_cache.get(()):
+            yield self.remote_list_request_cache.get(())
+
+        public_rooms = yield self.get_local_public_room_list()
+
+        # keep track of which room IDs we've seen so we can de-dup
+        room_ids = set()
+
+        # tag all the ones in our list with our server name.
+        # Also add the them to the de-deping set
+        for room in public_rooms['chunk']:
+            room["server_name"] = self.hs.hostname
+            room_ids.add(room["room_id"])
+
+        # Now add the results from federation
+        for server_name, server_result in self.remote_list_cache.items():
+            for room in server_result["chunk"]:
+                if room["room_id"] not in room_ids:
+                    room["server_name"] = server_name
+                    public_rooms["chunk"].append(room)
+                    room_ids.add(room["room_id"])
+
+        defer.returnValue(public_rooms)
+
 
 class RoomContextHandler(BaseHandler):
     @defer.inlineCallbacks
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 7e616f44fd..8cec8fc4ed 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -14,24 +14,22 @@
 # limitations under the License.
 
 
-from twisted.internet import defer
+import logging
 
-from ._base import BaseHandler
+from signedjson.key import decode_verify_key_bytes
+from signedjson.sign import verify_signed_json
+from twisted.internet import defer
+from unpaddedbase64 import decode_base64
 
-from synapse.types import UserID, RoomID, Requester
+import synapse.types
 from synapse.api.constants import (
     EventTypes, Membership,
 )
 from synapse.api.errors import AuthError, SynapseError, Codes
+from synapse.types import UserID, RoomID
 from synapse.util.async import Linearizer
 from synapse.util.distributor import user_left_room, user_joined_room
-
-from signedjson.sign import verify_signed_json
-from signedjson.key import decode_verify_key_bytes
-
-from unpaddedbase64 import decode_base64
-
-import logging
+from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
@@ -315,7 +313,7 @@ class RoomMemberHandler(BaseHandler):
             )
             assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
         else:
-            requester = Requester(target_user, None, False)
+            requester = synapse.types.create_requester(target_user)
 
         message_handler = self.hs.get_handlers().message_handler
         prev_event = message_handler.deduplicate_state_event(event, context)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 9ebfccc8bf..0ee4ebe504 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -13,7 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.streams.config import PaginationConfig
 from synapse.api.constants import Membership, EventTypes
 from synapse.util.async import concurrently_execute
 from synapse.util.logcontext import LoggingContext
@@ -139,7 +138,7 @@ class SyncHandler(object):
         self.presence_handler = hs.get_presence_handler()
         self.event_sources = hs.get_event_sources()
         self.clock = hs.get_clock()
-        self.response_cache = ResponseCache()
+        self.response_cache = ResponseCache(hs)
 
     def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
                                full_state=False):
@@ -194,195 +193,15 @@ class SyncHandler(object):
         Returns:
             A Deferred SyncResult.
         """
-        if since_token is None or full_state:
-            return self.full_state_sync(sync_config, since_token)
-        else:
-            return self.incremental_sync_with_gap(sync_config, since_token)
-
-    @defer.inlineCallbacks
-    def full_state_sync(self, sync_config, timeline_since_token):
-        """Get a sync for a client which is starting without any state.
-
-        If a 'message_since_token' is given, only timeline events which have
-        happened since that token will be returned.
-
-        Returns:
-            A Deferred SyncResult.
-        """
-        now_token = yield self.event_sources.get_current_token()
-
-        now_token, ephemeral_by_room = yield self.ephemeral_by_room(
-            sync_config, now_token
-        )
-
-        presence_stream = self.event_sources.sources["presence"]
-        # TODO (mjark): This looks wrong, shouldn't we be getting the presence
-        # UP to the present rather than after the present?
-        pagination_config = PaginationConfig(from_token=now_token)
-        presence, _ = yield presence_stream.get_pagination_rows(
-            user=sync_config.user,
-            pagination_config=pagination_config.get_source_config("presence"),
-            key=None
-        )
-
-        membership_list = (
-            Membership.INVITE, Membership.JOIN, Membership.LEAVE, Membership.BAN
-        )
-
-        room_list = yield self.store.get_rooms_for_user_where_membership_is(
-            user_id=sync_config.user.to_string(),
-            membership_list=membership_list
-        )
-
-        account_data, account_data_by_room = (
-            yield self.store.get_account_data_for_user(
-                sync_config.user.to_string()
-            )
-        )
-
-        account_data['m.push_rules'] = yield self.push_rules_for_user(
-            sync_config.user
-        )
-
-        tags_by_room = yield self.store.get_tags_for_user(
-            sync_config.user.to_string()
-        )
-
-        ignored_users = account_data.get(
-            "m.ignored_user_list", {}
-        ).get("ignored_users", {}).keys()
-
-        joined = []
-        invited = []
-        archived = []
-
-        user_id = sync_config.user.to_string()
-
-        @defer.inlineCallbacks
-        def _generate_room_entry(event):
-            if event.membership == Membership.JOIN:
-                room_result = yield self.full_state_sync_for_joined_room(
-                    room_id=event.room_id,
-                    sync_config=sync_config,
-                    now_token=now_token,
-                    timeline_since_token=timeline_since_token,
-                    ephemeral_by_room=ephemeral_by_room,
-                    tags_by_room=tags_by_room,
-                    account_data_by_room=account_data_by_room,
-                )
-                joined.append(room_result)
-            elif event.membership == Membership.INVITE:
-                if event.sender in ignored_users:
-                    return
-                invite = yield self.store.get_event(event.event_id)
-                invited.append(InvitedSyncResult(
-                    room_id=event.room_id,
-                    invite=invite,
-                ))
-            elif event.membership in (Membership.LEAVE, Membership.BAN):
-                # Always send down rooms we were banned or kicked from.
-                if not sync_config.filter_collection.include_leave:
-                    if event.membership == Membership.LEAVE:
-                        if user_id == event.sender:
-                            return
-
-                leave_token = now_token.copy_and_replace(
-                    "room_key", "s%d" % (event.stream_ordering,)
-                )
-                room_result = yield self.full_state_sync_for_archived_room(
-                    sync_config=sync_config,
-                    room_id=event.room_id,
-                    leave_event_id=event.event_id,
-                    leave_token=leave_token,
-                    timeline_since_token=timeline_since_token,
-                    tags_by_room=tags_by_room,
-                    account_data_by_room=account_data_by_room,
-                )
-                archived.append(room_result)
-
-        yield concurrently_execute(_generate_room_entry, room_list, 10)
-
-        account_data_for_user = sync_config.filter_collection.filter_account_data(
-            self.account_data_for_user(account_data)
-        )
-
-        presence = sync_config.filter_collection.filter_presence(
-            presence
-        )
-
-        defer.returnValue(SyncResult(
-            presence=presence,
-            account_data=account_data_for_user,
-            joined=joined,
-            invited=invited,
-            archived=archived,
-            next_batch=now_token,
-        ))
-
-    @defer.inlineCallbacks
-    def full_state_sync_for_joined_room(self, room_id, sync_config,
-                                        now_token, timeline_since_token,
-                                        ephemeral_by_room, tags_by_room,
-                                        account_data_by_room):
-        """Sync a room for a client which is starting without any state
-        Returns:
-            A Deferred JoinedSyncResult.
-        """
-
-        batch = yield self.load_filtered_recents(
-            room_id, sync_config, now_token, since_token=timeline_since_token
-        )
-
-        room_sync = yield self.incremental_sync_with_gap_for_room(
-            room_id, sync_config,
-            now_token=now_token,
-            since_token=timeline_since_token,
-            ephemeral_by_room=ephemeral_by_room,
-            tags_by_room=tags_by_room,
-            account_data_by_room=account_data_by_room,
-            batch=batch,
-            full_state=True,
-        )
-
-        defer.returnValue(room_sync)
+        return self.generate_sync_result(sync_config, since_token, full_state)
 
     @defer.inlineCallbacks
     def push_rules_for_user(self, user):
         user_id = user.to_string()
-        rawrules = yield self.store.get_push_rules_for_user(user_id)
-        enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id)
-        rules = format_push_rules_for_user(user, rawrules, enabled_map)
+        rules = yield self.store.get_push_rules_for_user(user_id)
+        rules = format_push_rules_for_user(user, rules)
         defer.returnValue(rules)
 
-    def account_data_for_user(self, account_data):
-        account_data_events = []
-
-        for account_data_type, content in account_data.items():
-            account_data_events.append({
-                "type": account_data_type,
-                "content": content,
-            })
-
-        return account_data_events
-
-    def account_data_for_room(self, room_id, tags_by_room, account_data_by_room):
-        account_data_events = []
-        tags = tags_by_room.get(room_id)
-        if tags is not None:
-            account_data_events.append({
-                "type": "m.tag",
-                "content": {"tags": tags},
-            })
-
-        account_data = account_data_by_room.get(room_id, {})
-        for account_data_type, content in account_data.items():
-            account_data_events.append({
-                "type": account_data_type,
-                "content": content,
-            })
-
-        return account_data_events
-
     @defer.inlineCallbacks
     def ephemeral_by_room(self, sync_config, now_token, since_token=None):
         """Get the ephemeral events for each room the user is in
@@ -445,258 +264,22 @@ class SyncHandler(object):
 
         defer.returnValue((now_token, ephemeral_by_room))
 
-    def full_state_sync_for_archived_room(self, room_id, sync_config,
-                                          leave_event_id, leave_token,
-                                          timeline_since_token, tags_by_room,
-                                          account_data_by_room):
-        """Sync a room for a client which is starting without any state
-        Returns:
-            A Deferred ArchivedSyncResult.
-        """
-
-        return self.incremental_sync_for_archived_room(
-            sync_config, room_id, leave_event_id, timeline_since_token, tags_by_room,
-            account_data_by_room, full_state=True, leave_token=leave_token,
-        )
-
-    @defer.inlineCallbacks
-    def incremental_sync_with_gap(self, sync_config, since_token):
-        """ Get the incremental delta needed to bring the client up to
-        date with the server.
-        Returns:
-            A Deferred SyncResult.
-        """
-        now_token = yield self.event_sources.get_current_token()
-
-        rooms = yield self.store.get_rooms_for_user(sync_config.user.to_string())
-        room_ids = [room.room_id for room in rooms]
-
-        presence_source = self.event_sources.sources["presence"]
-        presence, presence_key = yield presence_source.get_new_events(
-            user=sync_config.user,
-            from_key=since_token.presence_key,
-            limit=sync_config.filter_collection.presence_limit(),
-            room_ids=room_ids,
-            is_guest=sync_config.is_guest,
-        )
-        now_token = now_token.copy_and_replace("presence_key", presence_key)
-
-        now_token, ephemeral_by_room = yield self.ephemeral_by_room(
-            sync_config, now_token, since_token
-        )
-
-        app_service = yield self.store.get_app_service_by_user_id(
-            sync_config.user.to_string()
-        )
-        if app_service:
-            rooms = yield self.store.get_app_service_rooms(app_service)
-            joined_room_ids = set(r.room_id for r in rooms)
-        else:
-            rooms = yield self.store.get_rooms_for_user(
-                sync_config.user.to_string()
-            )
-            joined_room_ids = set(r.room_id for r in rooms)
-
-        user_id = sync_config.user.to_string()
-
-        timeline_limit = sync_config.filter_collection.timeline_limit()
-
-        tags_by_room = yield self.store.get_updated_tags(
-            user_id,
-            since_token.account_data_key,
-        )
-
-        account_data, account_data_by_room = (
-            yield self.store.get_updated_account_data_for_user(
-                user_id,
-                since_token.account_data_key,
-            )
-        )
-
-        push_rules_changed = yield self.store.have_push_rules_changed_for_user(
-            user_id, int(since_token.push_rules_key)
-        )
-
-        if push_rules_changed:
-            account_data["m.push_rules"] = yield self.push_rules_for_user(
-                sync_config.user
-            )
-
-        ignored_account_data = yield self.store.get_global_account_data_by_type_for_user(
-            "m.ignored_user_list", user_id=user_id,
-        )
-
-        if ignored_account_data:
-            ignored_users = ignored_account_data.get("ignored_users", {}).keys()
-        else:
-            ignored_users = frozenset()
-
-        # Get a list of membership change events that have happened.
-        rooms_changed = yield self.store.get_membership_changes_for_user(
-            user_id, since_token.room_key, now_token.room_key
-        )
-
-        mem_change_events_by_room_id = {}
-        for event in rooms_changed:
-            mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)
-
-        newly_joined_rooms = []
-        archived = []
-        invited = []
-        for room_id, events in mem_change_events_by_room_id.items():
-            non_joins = [e for e in events if e.membership != Membership.JOIN]
-            has_join = len(non_joins) != len(events)
-
-            # We want to figure out if we joined the room at some point since
-            # the last sync (even if we have since left). This is to make sure
-            # we do send down the room, and with full state, where necessary
-            if room_id in joined_room_ids or has_join:
-                old_state = yield self.get_state_at(room_id, since_token)
-                old_mem_ev = old_state.get((EventTypes.Member, user_id), None)
-                if not old_mem_ev or old_mem_ev.membership != Membership.JOIN:
-                        newly_joined_rooms.append(room_id)
-
-                if room_id in joined_room_ids:
-                    continue
-
-            if not non_joins:
-                continue
-
-            # Only bother if we're still currently invited
-            should_invite = non_joins[-1].membership == Membership.INVITE
-            if should_invite:
-                if event.sender not in ignored_users:
-                    room_sync = InvitedSyncResult(room_id, invite=non_joins[-1])
-                    if room_sync:
-                        invited.append(room_sync)
-
-            # Always include leave/ban events. Just take the last one.
-            # TODO: How do we handle ban -> leave in same batch?
-            leave_events = [
-                e for e in non_joins
-                if e.membership in (Membership.LEAVE, Membership.BAN)
-            ]
-
-            if leave_events:
-                leave_event = leave_events[-1]
-                room_sync = yield self.incremental_sync_for_archived_room(
-                    sync_config, room_id, leave_event.event_id, since_token,
-                    tags_by_room, account_data_by_room,
-                    full_state=room_id in newly_joined_rooms
-                )
-                if room_sync:
-                    archived.append(room_sync)
-
-        # Get all events for rooms we're currently joined to.
-        room_to_events = yield self.store.get_room_events_stream_for_rooms(
-            room_ids=joined_room_ids,
-            from_key=since_token.room_key,
-            to_key=now_token.room_key,
-            limit=timeline_limit + 1,
-        )
-
-        joined = []
-        # We loop through all room ids, even if there are no new events, in case
-        # there are non room events taht we need to notify about.
-        for room_id in joined_room_ids:
-            room_entry = room_to_events.get(room_id, None)
-
-            if room_entry:
-                events, start_key = room_entry
-
-                prev_batch_token = now_token.copy_and_replace("room_key", start_key)
-
-                newly_joined_room = room_id in newly_joined_rooms
-                full_state = newly_joined_room
-
-                batch = yield self.load_filtered_recents(
-                    room_id, sync_config, prev_batch_token,
-                    since_token=since_token,
-                    recents=events,
-                    newly_joined_room=newly_joined_room,
-                )
-            else:
-                batch = TimelineBatch(
-                    events=[],
-                    prev_batch=since_token,
-                    limited=False,
-                )
-                full_state = False
-
-            room_sync = yield self.incremental_sync_with_gap_for_room(
-                room_id=room_id,
-                sync_config=sync_config,
-                since_token=since_token,
-                now_token=now_token,
-                ephemeral_by_room=ephemeral_by_room,
-                tags_by_room=tags_by_room,
-                account_data_by_room=account_data_by_room,
-                batch=batch,
-                full_state=full_state,
-            )
-            if room_sync:
-                joined.append(room_sync)
-
-        # For each newly joined room, we want to send down presence of
-        # existing users.
-        presence_handler = self.presence_handler
-        extra_presence_users = set()
-        for room_id in newly_joined_rooms:
-            users = yield self.store.get_users_in_room(event.room_id)
-            extra_presence_users.update(users)
-
-        # For each new member, send down presence.
-        for joined_sync in joined:
-            it = itertools.chain(joined_sync.timeline.events, joined_sync.state.values())
-            for event in it:
-                if event.type == EventTypes.Member:
-                    if event.membership == Membership.JOIN:
-                        extra_presence_users.add(event.state_key)
-
-        states = yield presence_handler.get_states(
-            [u for u in extra_presence_users if u != user_id],
-            as_event=True,
-        )
-        presence.extend(states)
-
-        account_data_for_user = sync_config.filter_collection.filter_account_data(
-            self.account_data_for_user(account_data)
-        )
-
-        presence = sync_config.filter_collection.filter_presence(
-            presence
-        )
-
-        defer.returnValue(SyncResult(
-            presence=presence,
-            account_data=account_data_for_user,
-            joined=joined,
-            invited=invited,
-            archived=archived,
-            next_batch=now_token,
-        ))
-
     @defer.inlineCallbacks
-    def load_filtered_recents(self, room_id, sync_config, now_token,
-                              since_token=None, recents=None, newly_joined_room=False):
+    def _load_filtered_recents(self, room_id, sync_config, now_token,
+                               since_token=None, recents=None, newly_joined_room=False):
         """
         Returns:
             a Deferred TimelineBatch
         """
         with Measure(self.clock, "load_filtered_recents"):
-            filtering_factor = 2
             timeline_limit = sync_config.filter_collection.timeline_limit()
-            load_limit = max(timeline_limit * filtering_factor, 10)
-            max_repeat = 5  # Only try a few times per room, otherwise
-            room_key = now_token.room_key
-            end_key = room_key
 
             if recents is None or newly_joined_room or timeline_limit < len(recents):
                 limited = True
             else:
                 limited = False
 
-            if recents is not None:
+            if recents:
                 recents = sync_config.filter_collection.filter_room_timeline(recents)
                 recents = yield filter_events_for_client(
                     self.store,
@@ -706,6 +289,19 @@ class SyncHandler(object):
             else:
                 recents = []
 
+            if not limited:
+                defer.returnValue(TimelineBatch(
+                    events=recents,
+                    prev_batch=now_token,
+                    limited=False
+                ))
+
+            filtering_factor = 2
+            load_limit = max(timeline_limit * filtering_factor, 10)
+            max_repeat = 5  # Only try a few times per room, otherwise
+            room_key = now_token.room_key
+            end_key = room_key
+
             since_key = None
             if since_token and not newly_joined_room:
                 since_key = since_token.room_key
@@ -749,103 +345,6 @@ class SyncHandler(object):
         ))
 
     @defer.inlineCallbacks
-    def incremental_sync_with_gap_for_room(self, room_id, sync_config,
-                                           since_token, now_token,
-                                           ephemeral_by_room, tags_by_room,
-                                           account_data_by_room,
-                                           batch, full_state=False):
-        state = yield self.compute_state_delta(
-            room_id, batch, sync_config, since_token, now_token,
-            full_state=full_state
-        )
-
-        account_data = self.account_data_for_room(
-            room_id, tags_by_room, account_data_by_room
-        )
-
-        account_data = sync_config.filter_collection.filter_room_account_data(
-            account_data
-        )
-
-        ephemeral = sync_config.filter_collection.filter_room_ephemeral(
-            ephemeral_by_room.get(room_id, [])
-        )
-
-        unread_notifications = {}
-        room_sync = JoinedSyncResult(
-            room_id=room_id,
-            timeline=batch,
-            state=state,
-            ephemeral=ephemeral,
-            account_data=account_data,
-            unread_notifications=unread_notifications,
-        )
-
-        if room_sync:
-            notifs = yield self.unread_notifs_for_room_id(
-                room_id, sync_config
-            )
-
-            if notifs is not None:
-                unread_notifications["notification_count"] = notifs["notify_count"]
-                unread_notifications["highlight_count"] = notifs["highlight_count"]
-
-        logger.debug("Room sync: %r", room_sync)
-
-        defer.returnValue(room_sync)
-
-    @defer.inlineCallbacks
-    def incremental_sync_for_archived_room(self, sync_config, room_id, leave_event_id,
-                                           since_token, tags_by_room,
-                                           account_data_by_room, full_state,
-                                           leave_token=None):
-        """ Get the incremental delta needed to bring the client up to date for
-        the archived room.
-        Returns:
-            A Deferred ArchivedSyncResult
-        """
-
-        if not leave_token:
-            stream_token = yield self.store.get_stream_token_for_event(
-                leave_event_id
-            )
-
-            leave_token = since_token.copy_and_replace("room_key", stream_token)
-
-        if since_token and since_token.is_after(leave_token):
-            defer.returnValue(None)
-
-        batch = yield self.load_filtered_recents(
-            room_id, sync_config, leave_token, since_token,
-        )
-
-        logger.debug("Recents %r", batch)
-
-        state_events_delta = yield self.compute_state_delta(
-            room_id, batch, sync_config, since_token, leave_token,
-            full_state=full_state
-        )
-
-        account_data = self.account_data_for_room(
-            room_id, tags_by_room, account_data_by_room
-        )
-
-        account_data = sync_config.filter_collection.filter_room_account_data(
-            account_data
-        )
-
-        room_sync = ArchivedSyncResult(
-            room_id=room_id,
-            timeline=batch,
-            state=state_events_delta,
-            account_data=account_data,
-        )
-
-        logger.debug("Room sync: %r", room_sync)
-
-        defer.returnValue(room_sync)
-
-    @defer.inlineCallbacks
     def get_state_after_event(self, event):
         """
         Get the room state after the given event
@@ -970,26 +469,6 @@ class SyncHandler(object):
                 for e in sync_config.filter_collection.filter_room_state(state.values())
             })
 
-    def check_joined_room(self, sync_config, state_delta):
-        """
-        Check if the user has just joined the given room (so should
-        be given the full state)
-
-        Args:
-            sync_config(synapse.handlers.sync.SyncConfig):
-            state_delta(dict[(str,str), synapse.events.FrozenEvent]): the
-                difference in state since the last sync
-
-        Returns:
-             A deferred Tuple (state_delta, limited)
-        """
-        join_event = state_delta.get((
-            EventTypes.Member, sync_config.user.to_string()), None)
-        if join_event is not None:
-            if join_event.content["membership"] == Membership.JOIN:
-                return True
-        return False
-
     @defer.inlineCallbacks
     def unread_notifs_for_room_id(self, room_id, sync_config):
         with Measure(self.clock, "unread_notifs_for_room_id"):
@@ -1010,6 +489,551 @@ class SyncHandler(object):
             # count is whatever it was last time.
             defer.returnValue(None)
 
+    @defer.inlineCallbacks
+    def generate_sync_result(self, sync_config, since_token=None, full_state=False):
+        """Generates a sync result.
+
+        Args:
+            sync_config (SyncConfig)
+            since_token (StreamToken)
+            full_state (bool)
+
+        Returns:
+            Deferred(SyncResult)
+        """
+
+        # NB: The now_token gets changed by some of the generate_sync_* methods,
+        # this is due to some of the underlying streams not supporting the ability
+        # to query up to a given point.
+        # Always use the `now_token` in `SyncResultBuilder`
+        now_token = yield self.event_sources.get_current_token()
+
+        sync_result_builder = SyncResultBuilder(
+            sync_config, full_state,
+            since_token=since_token,
+            now_token=now_token,
+        )
+
+        account_data_by_room = yield self._generate_sync_entry_for_account_data(
+            sync_result_builder
+        )
+
+        res = yield self._generate_sync_entry_for_rooms(
+            sync_result_builder, account_data_by_room
+        )
+        newly_joined_rooms, newly_joined_users = res
+
+        yield self._generate_sync_entry_for_presence(
+            sync_result_builder, newly_joined_rooms, newly_joined_users
+        )
+
+        defer.returnValue(SyncResult(
+            presence=sync_result_builder.presence,
+            account_data=sync_result_builder.account_data,
+            joined=sync_result_builder.joined,
+            invited=sync_result_builder.invited,
+            archived=sync_result_builder.archived,
+            next_batch=sync_result_builder.now_token,
+        ))
+
+    @defer.inlineCallbacks
+    def _generate_sync_entry_for_account_data(self, sync_result_builder):
+        """Generates the account data portion of the sync response. Populates
+        `sync_result_builder` with the result.
+
+        Args:
+            sync_result_builder(SyncResultBuilder)
+
+        Returns:
+            Deferred(dict): A dictionary containing the per room account data.
+        """
+        sync_config = sync_result_builder.sync_config
+        user_id = sync_result_builder.sync_config.user.to_string()
+        since_token = sync_result_builder.since_token
+
+        if since_token and not sync_result_builder.full_state:
+            account_data, account_data_by_room = (
+                yield self.store.get_updated_account_data_for_user(
+                    user_id,
+                    since_token.account_data_key,
+                )
+            )
+
+            push_rules_changed = yield self.store.have_push_rules_changed_for_user(
+                user_id, int(since_token.push_rules_key)
+            )
+
+            if push_rules_changed:
+                account_data["m.push_rules"] = yield self.push_rules_for_user(
+                    sync_config.user
+                )
+        else:
+            account_data, account_data_by_room = (
+                yield self.store.get_account_data_for_user(
+                    sync_config.user.to_string()
+                )
+            )
+
+            account_data['m.push_rules'] = yield self.push_rules_for_user(
+                sync_config.user
+            )
+
+        account_data_for_user = sync_config.filter_collection.filter_account_data([
+            {"type": account_data_type, "content": content}
+            for account_data_type, content in account_data.items()
+        ])
+
+        sync_result_builder.account_data = account_data_for_user
+
+        defer.returnValue(account_data_by_room)
+
+    @defer.inlineCallbacks
+    def _generate_sync_entry_for_presence(self, sync_result_builder, newly_joined_rooms,
+                                          newly_joined_users):
+        """Generates the presence portion of the sync response. Populates the
+        `sync_result_builder` with the result.
+
+        Args:
+            sync_result_builder(SyncResultBuilder)
+            newly_joined_rooms(list): List of rooms that the user has joined
+                since the last sync (or empty if an initial sync)
+            newly_joined_users(list): List of users that have joined rooms
+                since the last sync (or empty if an initial sync)
+        """
+        now_token = sync_result_builder.now_token
+        sync_config = sync_result_builder.sync_config
+        user = sync_result_builder.sync_config.user
+
+        presence_source = self.event_sources.sources["presence"]
+
+        since_token = sync_result_builder.since_token
+        if since_token and not sync_result_builder.full_state:
+            presence_key = since_token.presence_key
+            include_offline = True
+        else:
+            presence_key = None
+            include_offline = False
+
+        presence, presence_key = yield presence_source.get_new_events(
+            user=user,
+            from_key=presence_key,
+            is_guest=sync_config.is_guest,
+            include_offline=include_offline,
+        )
+        sync_result_builder.now_token = now_token.copy_and_replace(
+            "presence_key", presence_key
+        )
+
+        extra_users_ids = set(newly_joined_users)
+        for room_id in newly_joined_rooms:
+            users = yield self.store.get_users_in_room(room_id)
+            extra_users_ids.update(users)
+        extra_users_ids.discard(user.to_string())
+
+        states = yield self.presence_handler.get_states(
+            extra_users_ids,
+            as_event=True,
+        )
+        presence.extend(states)
+
+        # Deduplicate the presence entries so that there's at most one per user
+        presence = {p["content"]["user_id"]: p for p in presence}.values()
+
+        presence = sync_config.filter_collection.filter_presence(
+            presence
+        )
+
+        sync_result_builder.presence = presence
+
+    @defer.inlineCallbacks
+    def _generate_sync_entry_for_rooms(self, sync_result_builder, account_data_by_room):
+        """Generates the rooms portion of the sync response. Populates the
+        `sync_result_builder` with the result.
+
+        Args:
+            sync_result_builder(SyncResultBuilder)
+            account_data_by_room(dict): Dictionary of per room account data
+
+        Returns:
+            Deferred(tuple): Returns a 2-tuple of
+            `(newly_joined_rooms, newly_joined_users)`
+        """
+        user_id = sync_result_builder.sync_config.user.to_string()
+
+        now_token, ephemeral_by_room = yield self.ephemeral_by_room(
+            sync_result_builder.sync_config,
+            now_token=sync_result_builder.now_token,
+            since_token=sync_result_builder.since_token,
+        )
+        sync_result_builder.now_token = now_token
+
+        ignored_account_data = yield self.store.get_global_account_data_by_type_for_user(
+            "m.ignored_user_list", user_id=user_id,
+        )
+
+        if ignored_account_data:
+            ignored_users = ignored_account_data.get("ignored_users", {}).keys()
+        else:
+            ignored_users = frozenset()
+
+        if sync_result_builder.since_token:
+            res = yield self._get_rooms_changed(sync_result_builder, ignored_users)
+            room_entries, invited, newly_joined_rooms = res
+
+            tags_by_room = yield self.store.get_updated_tags(
+                user_id,
+                sync_result_builder.since_token.account_data_key,
+            )
+        else:
+            res = yield self._get_all_rooms(sync_result_builder, ignored_users)
+            room_entries, invited, newly_joined_rooms = res
+
+            tags_by_room = yield self.store.get_tags_for_user(user_id)
+
+        def handle_room_entries(room_entry):
+            return self._generate_room_entry(
+                sync_result_builder,
+                ignored_users,
+                room_entry,
+                ephemeral=ephemeral_by_room.get(room_entry.room_id, []),
+                tags=tags_by_room.get(room_entry.room_id),
+                account_data=account_data_by_room.get(room_entry.room_id, {}),
+                always_include=sync_result_builder.full_state,
+            )
+
+        yield concurrently_execute(handle_room_entries, room_entries, 10)
+
+        sync_result_builder.invited.extend(invited)
+
+        # Now we want to get any newly joined users
+        newly_joined_users = set()
+        if sync_result_builder.since_token:
+            for joined_sync in sync_result_builder.joined:
+                it = itertools.chain(
+                    joined_sync.timeline.events, joined_sync.state.values()
+                )
+                for event in it:
+                    if event.type == EventTypes.Member:
+                        if event.membership == Membership.JOIN:
+                            newly_joined_users.add(event.state_key)
+
+        defer.returnValue((newly_joined_rooms, newly_joined_users))
+
+    @defer.inlineCallbacks
+    def _get_rooms_changed(self, sync_result_builder, ignored_users):
+        """Gets the the changes that have happened since the last sync.
+
+        Args:
+            sync_result_builder(SyncResultBuilder)
+            ignored_users(set(str)): Set of users ignored by user.
+
+        Returns:
+            Deferred(tuple): Returns a tuple of the form:
+            `([RoomSyncResultBuilder], [InvitedSyncResult], newly_joined_rooms)`
+        """
+        user_id = sync_result_builder.sync_config.user.to_string()
+        since_token = sync_result_builder.since_token
+        now_token = sync_result_builder.now_token
+        sync_config = sync_result_builder.sync_config
+
+        assert since_token
+
+        app_service = yield self.store.get_app_service_by_user_id(user_id)
+        if app_service:
+            rooms = yield self.store.get_app_service_rooms(app_service)
+            joined_room_ids = set(r.room_id for r in rooms)
+        else:
+            rooms = yield self.store.get_rooms_for_user(user_id)
+            joined_room_ids = set(r.room_id for r in rooms)
+
+        # Get a list of membership change events that have happened.
+        rooms_changed = yield self.store.get_membership_changes_for_user(
+            user_id, since_token.room_key, now_token.room_key
+        )
+
+        mem_change_events_by_room_id = {}
+        for event in rooms_changed:
+            mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)
+
+        newly_joined_rooms = []
+        room_entries = []
+        invited = []
+        for room_id, events in mem_change_events_by_room_id.items():
+            non_joins = [e for e in events if e.membership != Membership.JOIN]
+            has_join = len(non_joins) != len(events)
+
+            # We want to figure out if we joined the room at some point since
+            # the last sync (even if we have since left). This is to make sure
+            # we do send down the room, and with full state, where necessary
+            if room_id in joined_room_ids or has_join:
+                old_state = yield self.get_state_at(room_id, since_token)
+                old_mem_ev = old_state.get((EventTypes.Member, user_id), None)
+                if not old_mem_ev or old_mem_ev.membership != Membership.JOIN:
+                    newly_joined_rooms.append(room_id)
+
+                if room_id in joined_room_ids:
+                    continue
+
+            if not non_joins:
+                continue
+
+            # Only bother if we're still currently invited
+            should_invite = non_joins[-1].membership == Membership.INVITE
+            if should_invite:
+                if event.sender not in ignored_users:
+                    room_sync = InvitedSyncResult(room_id, invite=non_joins[-1])
+                    if room_sync:
+                        invited.append(room_sync)
+
+            # Always include leave/ban events. Just take the last one.
+            # TODO: How do we handle ban -> leave in same batch?
+            leave_events = [
+                e for e in non_joins
+                if e.membership in (Membership.LEAVE, Membership.BAN)
+            ]
+
+            if leave_events:
+                leave_event = leave_events[-1]
+                leave_stream_token = yield self.store.get_stream_token_for_event(
+                    leave_event.event_id
+                )
+                leave_token = since_token.copy_and_replace(
+                    "room_key", leave_stream_token
+                )
+
+                if since_token and since_token.is_after(leave_token):
+                    continue
+
+                room_entries.append(RoomSyncResultBuilder(
+                    room_id=room_id,
+                    rtype="archived",
+                    events=None,
+                    newly_joined=room_id in newly_joined_rooms,
+                    full_state=False,
+                    since_token=since_token,
+                    upto_token=leave_token,
+                ))
+
+        timeline_limit = sync_config.filter_collection.timeline_limit()
+
+        # Get all events for rooms we're currently joined to.
+        room_to_events = yield self.store.get_room_events_stream_for_rooms(
+            room_ids=joined_room_ids,
+            from_key=since_token.room_key,
+            to_key=now_token.room_key,
+            limit=timeline_limit + 1,
+        )
+
+        # We loop through all room ids, even if there are no new events, in case
+        # there are non room events taht we need to notify about.
+        for room_id in joined_room_ids:
+            room_entry = room_to_events.get(room_id, None)
+
+            if room_entry:
+                events, start_key = room_entry
+
+                prev_batch_token = now_token.copy_and_replace("room_key", start_key)
+
+                room_entries.append(RoomSyncResultBuilder(
+                    room_id=room_id,
+                    rtype="joined",
+                    events=events,
+                    newly_joined=room_id in newly_joined_rooms,
+                    full_state=False,
+                    since_token=None if room_id in newly_joined_rooms else since_token,
+                    upto_token=prev_batch_token,
+                ))
+            else:
+                room_entries.append(RoomSyncResultBuilder(
+                    room_id=room_id,
+                    rtype="joined",
+                    events=[],
+                    newly_joined=room_id in newly_joined_rooms,
+                    full_state=False,
+                    since_token=since_token,
+                    upto_token=since_token,
+                ))
+
+        defer.returnValue((room_entries, invited, newly_joined_rooms))
+
+    @defer.inlineCallbacks
+    def _get_all_rooms(self, sync_result_builder, ignored_users):
+        """Returns entries for all rooms for the user.
+
+        Args:
+            sync_result_builder(SyncResultBuilder)
+            ignored_users(set(str)): Set of users ignored by user.
+
+        Returns:
+            Deferred(tuple): Returns a tuple of the form:
+            `([RoomSyncResultBuilder], [InvitedSyncResult], [])`
+        """
+
+        user_id = sync_result_builder.sync_config.user.to_string()
+        since_token = sync_result_builder.since_token
+        now_token = sync_result_builder.now_token
+        sync_config = sync_result_builder.sync_config
+
+        membership_list = (
+            Membership.INVITE, Membership.JOIN, Membership.LEAVE, Membership.BAN
+        )
+
+        room_list = yield self.store.get_rooms_for_user_where_membership_is(
+            user_id=user_id,
+            membership_list=membership_list
+        )
+
+        room_entries = []
+        invited = []
+
+        for event in room_list:
+            if event.membership == Membership.JOIN:
+                room_entries.append(RoomSyncResultBuilder(
+                    room_id=event.room_id,
+                    rtype="joined",
+                    events=None,
+                    newly_joined=False,
+                    full_state=True,
+                    since_token=since_token,
+                    upto_token=now_token,
+                ))
+            elif event.membership == Membership.INVITE:
+                if event.sender in ignored_users:
+                    continue
+                invite = yield self.store.get_event(event.event_id)
+                invited.append(InvitedSyncResult(
+                    room_id=event.room_id,
+                    invite=invite,
+                ))
+            elif event.membership in (Membership.LEAVE, Membership.BAN):
+                # Always send down rooms we were banned or kicked from.
+                if not sync_config.filter_collection.include_leave:
+                    if event.membership == Membership.LEAVE:
+                        if user_id == event.sender:
+                            continue
+
+                leave_token = now_token.copy_and_replace(
+                    "room_key", "s%d" % (event.stream_ordering,)
+                )
+                room_entries.append(RoomSyncResultBuilder(
+                    room_id=event.room_id,
+                    rtype="archived",
+                    events=None,
+                    newly_joined=False,
+                    full_state=True,
+                    since_token=since_token,
+                    upto_token=leave_token,
+                ))
+
+        defer.returnValue((room_entries, invited, []))
+
+    @defer.inlineCallbacks
+    def _generate_room_entry(self, sync_result_builder, ignored_users,
+                             room_builder, ephemeral, tags, account_data,
+                             always_include=False):
+        """Populates the `joined` and `archived` section of `sync_result_builder`
+        based on the `room_builder`.
+
+        Args:
+            sync_result_builder(SyncResultBuilder)
+            ignored_users(set(str)): Set of users ignored by user.
+            room_builder(RoomSyncResultBuilder)
+            ephemeral(list): List of new ephemeral events for room
+            tags(list): List of *all* tags for room, or None if there has been
+                no change.
+            account_data(list): List of new account data for room
+            always_include(bool): Always include this room in the sync response,
+                even if empty.
+        """
+        newly_joined = room_builder.newly_joined
+        full_state = (
+            room_builder.full_state
+            or newly_joined
+            or sync_result_builder.full_state
+        )
+        events = room_builder.events
+
+        # We want to shortcut out as early as possible.
+        if not (always_include or account_data or ephemeral or full_state):
+            if events == [] and tags is None:
+                return
+
+        since_token = sync_result_builder.since_token
+        now_token = sync_result_builder.now_token
+        sync_config = sync_result_builder.sync_config
+
+        room_id = room_builder.room_id
+        since_token = room_builder.since_token
+        upto_token = room_builder.upto_token
+
+        batch = yield self._load_filtered_recents(
+            room_id, sync_config,
+            now_token=upto_token,
+            since_token=since_token,
+            recents=events,
+            newly_joined_room=newly_joined,
+        )
+
+        account_data_events = []
+        if tags is not None:
+            account_data_events.append({
+                "type": "m.tag",
+                "content": {"tags": tags},
+            })
+
+        for account_data_type, content in account_data.items():
+            account_data_events.append({
+                "type": account_data_type,
+                "content": content,
+            })
+
+        account_data = sync_config.filter_collection.filter_room_account_data(
+            account_data_events
+        )
+
+        ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral)
+
+        if not (always_include or batch or account_data or ephemeral or full_state):
+            return
+
+        state = yield self.compute_state_delta(
+            room_id, batch, sync_config, since_token, now_token,
+            full_state=full_state
+        )
+
+        if room_builder.rtype == "joined":
+            unread_notifications = {}
+            room_sync = JoinedSyncResult(
+                room_id=room_id,
+                timeline=batch,
+                state=state,
+                ephemeral=ephemeral,
+                account_data=account_data_events,
+                unread_notifications=unread_notifications,
+            )
+
+            if room_sync or always_include:
+                notifs = yield self.unread_notifs_for_room_id(
+                    room_id, sync_config
+                )
+
+                if notifs is not None:
+                    unread_notifications["notification_count"] = notifs["notify_count"]
+                    unread_notifications["highlight_count"] = notifs["highlight_count"]
+
+                sync_result_builder.joined.append(room_sync)
+        elif room_builder.rtype == "archived":
+            room_sync = ArchivedSyncResult(
+                room_id=room_id,
+                timeline=batch,
+                state=state,
+                account_data=account_data,
+            )
+            if room_sync or always_include:
+                sync_result_builder.archived.append(room_sync)
+        else:
+            raise Exception("Unrecognized rtype: %r", room_builder.rtype)
+
 
 def _action_has_highlight(actions):
     for action in actions:
@@ -1057,3 +1081,51 @@ def _calculate_state(timeline_contains, timeline_start, previous, current):
         (e.type, e.state_key): e
         for e in evs
     }
+
+
+class SyncResultBuilder(object):
+    "Used to help build up a new SyncResult for a user"
+    def __init__(self, sync_config, full_state, since_token, now_token):
+        """
+        Args:
+            sync_config(SyncConfig)
+            full_state(bool): The full_state flag as specified by user
+            since_token(StreamToken): The token supplied by user, or None.
+            now_token(StreamToken): The token to sync up to.
+        """
+        self.sync_config = sync_config
+        self.full_state = full_state
+        self.since_token = since_token
+        self.now_token = now_token
+
+        self.presence = []
+        self.account_data = []
+        self.joined = []
+        self.invited = []
+        self.archived = []
+
+
+class RoomSyncResultBuilder(object):
+    """Stores information needed to create either a `JoinedSyncResult` or
+    `ArchivedSyncResult`.
+    """
+    def __init__(self, room_id, rtype, events, newly_joined, full_state,
+                 since_token, upto_token):
+        """
+        Args:
+            room_id(str)
+            rtype(str): One of `"joined"` or `"archived"`
+            events(list): List of events to include in the room, (more events
+                may be added when generating result).
+            newly_joined(bool): If the user has newly joined the room
+            full_state(bool): Whether the full state should be sent in result
+            since_token(StreamToken): Earliest point to return events from, or None
+            upto_token(StreamToken): Latest point to return events from.
+        """
+        self.room_id = room_id
+        self.rtype = rtype
+        self.events = events
+        self.newly_joined = newly_joined
+        self.full_state = full_state
+        self.since_token = since_token
+        self.upto_token = upto_token
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index d46f05f426..5589296c09 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -30,7 +30,7 @@ logger = logging.getLogger(__name__)
 
 # A tiny object useful for storing a user's membership in a room, as a mapping
 # key
-RoomMember = namedtuple("RoomMember", ("room_id", "user"))
+RoomMember = namedtuple("RoomMember", ("room_id", "user_id"))
 
 
 class TypingHandler(object):
@@ -38,7 +38,7 @@ class TypingHandler(object):
         self.store = hs.get_datastore()
         self.server_name = hs.config.server_name
         self.auth = hs.get_auth()
-        self.is_mine = hs.is_mine
+        self.is_mine_id = hs.is_mine_id
         self.notifier = hs.get_notifier()
 
         self.clock = hs.get_clock()
@@ -67,20 +67,23 @@ class TypingHandler(object):
 
     @defer.inlineCallbacks
     def started_typing(self, target_user, auth_user, room_id, timeout):
-        if not self.is_mine(target_user):
+        target_user_id = target_user.to_string()
+        auth_user_id = auth_user.to_string()
+
+        if not self.is_mine_id(target_user_id):
             raise SynapseError(400, "User is not hosted on this Home Server")
 
-        if target_user != auth_user:
+        if target_user_id != auth_user_id:
             raise AuthError(400, "Cannot set another user's typing state")
 
-        yield self.auth.check_joined_room(room_id, target_user.to_string())
+        yield self.auth.check_joined_room(room_id, target_user_id)
 
         logger.debug(
-            "%s has started typing in %s", target_user.to_string(), room_id
+            "%s has started typing in %s", target_user_id, room_id
         )
 
         until = self.clock.time_msec() + timeout
-        member = RoomMember(room_id=room_id, user=target_user)
+        member = RoomMember(room_id=room_id, user_id=target_user_id)
 
         was_present = member in self._member_typing_until
 
@@ -104,25 +107,28 @@ class TypingHandler(object):
 
         yield self._push_update(
             room_id=room_id,
-            user=target_user,
+            user_id=target_user_id,
             typing=True,
         )
 
     @defer.inlineCallbacks
     def stopped_typing(self, target_user, auth_user, room_id):
-        if not self.is_mine(target_user):
+        target_user_id = target_user.to_string()
+        auth_user_id = auth_user.to_string()
+
+        if not self.is_mine_id(target_user_id):
             raise SynapseError(400, "User is not hosted on this Home Server")
 
-        if target_user != auth_user:
+        if target_user_id != auth_user_id:
             raise AuthError(400, "Cannot set another user's typing state")
 
-        yield self.auth.check_joined_room(room_id, target_user.to_string())
+        yield self.auth.check_joined_room(room_id, target_user_id)
 
         logger.debug(
-            "%s has stopped typing in %s", target_user.to_string(), room_id
+            "%s has stopped typing in %s", target_user_id, room_id
         )
 
-        member = RoomMember(room_id=room_id, user=target_user)
+        member = RoomMember(room_id=room_id, user_id=target_user_id)
 
         if member in self._member_typing_timer:
             self.clock.cancel_call_later(self._member_typing_timer[member])
@@ -132,8 +138,9 @@ class TypingHandler(object):
 
     @defer.inlineCallbacks
     def user_left_room(self, user, room_id):
-        if self.is_mine(user):
-            member = RoomMember(room_id=room_id, user=user)
+        user_id = user.to_string()
+        if self.is_mine_id(user_id):
+            member = RoomMember(room_id=room_id, user_id=user_id)
             yield self._stopped_typing(member)
 
     @defer.inlineCallbacks
@@ -144,7 +151,7 @@ class TypingHandler(object):
 
         yield self._push_update(
             room_id=member.room_id,
-            user=member.user,
+            user_id=member.user_id,
             typing=False,
         )
 
@@ -156,7 +163,7 @@ class TypingHandler(object):
             del self._member_typing_timer[member]
 
     @defer.inlineCallbacks
-    def _push_update(self, room_id, user, typing):
+    def _push_update(self, room_id, user_id, typing):
         domains = yield self.store.get_joined_hosts_for_room(room_id)
 
         deferreds = []
@@ -164,7 +171,7 @@ class TypingHandler(object):
             if domain == self.server_name:
                 self._push_update_local(
                     room_id=room_id,
-                    user=user,
+                    user_id=user_id,
                     typing=typing
                 )
             else:
@@ -173,7 +180,7 @@ class TypingHandler(object):
                     edu_type="m.typing",
                     content={
                         "room_id": room_id,
-                        "user_id": user.to_string(),
+                        "user_id": user_id,
                         "typing": typing,
                     },
                 ))
@@ -183,23 +190,26 @@ class TypingHandler(object):
     @defer.inlineCallbacks
     def _recv_edu(self, origin, content):
         room_id = content["room_id"]
-        user = UserID.from_string(content["user_id"])
+        user_id = content["user_id"]
+
+        # Check that the string is a valid user id
+        UserID.from_string(user_id)
 
         domains = yield self.store.get_joined_hosts_for_room(room_id)
 
         if self.server_name in domains:
             self._push_update_local(
                 room_id=room_id,
-                user=user,
+                user_id=user_id,
                 typing=content["typing"]
             )
 
-    def _push_update_local(self, room_id, user, typing):
+    def _push_update_local(self, room_id, user_id, typing):
         room_set = self._room_typing.setdefault(room_id, set())
         if typing:
-            room_set.add(user)
+            room_set.add(user_id)
         else:
-            room_set.discard(user)
+            room_set.discard(user_id)
 
         self._latest_room_serial += 1
         self._room_serials[room_id] = self._latest_room_serial
@@ -211,13 +221,14 @@ class TypingHandler(object):
 
     def get_all_typing_updates(self, last_id, current_id):
         # TODO: Work out a way to do this without scanning the entire state.
+        if last_id == current_id:
+            return []
+
         rows = []
         for room_id, serial in self._room_serials.items():
             if last_id < serial and serial <= current_id:
                 typing = self._room_typing[room_id]
-                typing_bytes = json.dumps([
-                    u.to_string() for u in typing
-                ], ensure_ascii=False)
+                typing_bytes = json.dumps(list(typing), ensure_ascii=False)
                 rows.append((serial, room_id, typing_bytes))
         rows.sort()
         return rows
@@ -239,7 +250,7 @@ class TypingNotificationEventSource(object):
             "type": "m.typing",
             "room_id": room_id,
             "content": {
-                "user_ids": [u.to_string() for u in typing],
+                "user_ids": list(typing),
             },
         }
 
diff --git a/synapse/http/client.py b/synapse/http/client.py
index c7fa692435..3ec9bc7faf 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -24,12 +24,13 @@ from synapse.http.endpoint import SpiderEndpoint
 
 from canonicaljson import encode_canonical_json
 
-from twisted.internet import defer, reactor, ssl, protocol
+from twisted.internet import defer, reactor, ssl, protocol, task
 from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint
 from twisted.web.client import (
     BrowserLikeRedirectAgent, ContentDecoderAgent, GzipDecoder, Agent,
-    readBody, FileBodyProducer, PartialDownloadError,
+    readBody, PartialDownloadError,
 )
+from twisted.web.client import FileBodyProducer as TwistedFileBodyProducer
 from twisted.web.http import PotentialDataLoss
 from twisted.web.http_headers import Headers
 from twisted.web._newclient import ResponseDone
@@ -468,3 +469,26 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory):
 
     def creatorForNetloc(self, hostname, port):
         return self
+
+
+class FileBodyProducer(TwistedFileBodyProducer):
+    """Workaround for https://twistedmatrix.com/trac/ticket/8473
+
+    We override the pauseProducing and resumeProducing methods in twisted's
+    FileBodyProducer so that they do not raise exceptions if the task has
+    already completed.
+    """
+
+    def pauseProducing(self):
+        try:
+            super(FileBodyProducer, self).pauseProducing()
+        except task.TaskDone:
+            # task has already completed
+            pass
+
+    def resumeProducing(self):
+        try:
+            super(FileBodyProducer, self).resumeProducing()
+        except task.NotPaused:
+            # task was not paused (probably because it had already completed)
+            pass
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index c3589534f8..f93093dd85 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -155,9 +155,7 @@ class MatrixFederationHttpClient(object):
                             time_out=timeout / 1000. if timeout else 60,
                         )
 
-                    response = yield preserve_context_over_fn(
-                        send_request,
-                    )
+                    response = yield preserve_context_over_fn(send_request)
 
                     log_result = "%d %s" % (response.code, response.phrase,)
                     break
diff --git a/synapse/http/server.py b/synapse/http/server.py
index f705abab94..2b3c05a740 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -205,6 +205,7 @@ class JsonResource(HttpServer, resource.Resource):
 
     def register_paths(self, method, path_patterns, callback):
         for path_pattern in path_patterns:
+            logger.debug("Registering for %s %s", method, path_pattern.pattern)
             self.path_regexs.setdefault(method, []).append(
                 self._PathEntry(path_pattern, callback)
             )
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 5664d5a381..76d5998d75 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -22,22 +22,20 @@ import functools
 import os
 import stat
 import time
+import gc
 
 from twisted.internet import reactor
 
 from .metric import (
-    CounterMetric, CallbackMetric, DistributionMetric, CacheMetric
+    CounterMetric, CallbackMetric, DistributionMetric, CacheMetric,
+    MemoryUsageMetric,
 )
 
 
 logger = logging.getLogger(__name__)
 
 
-# We'll keep all the available metrics in a single toplevel dict, one shared
-# for the entire process. We don't currently support per-HomeServer instances
-# of metrics, because in practice any one python VM will host only one
-# HomeServer anyway. This makes a lot of implementation neater
-all_metrics = {}
+all_metrics = []
 
 
 class Metrics(object):
@@ -53,7 +51,7 @@ class Metrics(object):
 
         metric = metric_class(full_name, *args, **kwargs)
 
-        all_metrics[full_name] = metric
+        all_metrics.append(metric)
         return metric
 
     def register_counter(self, *args, **kwargs):
@@ -69,6 +67,21 @@ class Metrics(object):
         return self._register(CacheMetric, *args, **kwargs)
 
 
+def register_memory_metrics(hs):
+    try:
+        import psutil
+        process = psutil.Process()
+        process.memory_info().rss
+    except (ImportError, AttributeError):
+        logger.warn(
+            "psutil is not installed or incorrect version."
+            " Disabling memory metrics."
+        )
+        return
+    metric = MemoryUsageMetric(hs, psutil)
+    all_metrics.append(metric)
+
+
 def get_metrics_for(pkg_name):
     """ Returns a Metrics instance for conveniently creating metrics
     namespaced with the given name prefix. """
@@ -84,12 +97,12 @@ def render_all():
     # TODO(paul): Internal hack
     update_resource_metrics()
 
-    for name in sorted(all_metrics.keys()):
+    for metric in all_metrics:
         try:
-            strs += all_metrics[name].render()
+            strs += metric.render()
         except Exception:
-            strs += ["# FAILED to render %s" % name]
-            logger.exception("Failed to render %s metric", name)
+            strs += ["# FAILED to render"]
+            logger.exception("Failed to render metric")
 
     strs.append("")  # to generate a final CRLF
 
@@ -156,6 +169,13 @@ reactor_metrics = get_metrics_for("reactor")
 tick_time = reactor_metrics.register_distribution("tick_time")
 pending_calls_metric = reactor_metrics.register_distribution("pending_calls")
 
+gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"])
+gc_unreachable = reactor_metrics.register_counter("gc_unreachable", labels=["gen"])
+
+reactor_metrics.register_callback(
+    "gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"]
+)
+
 
 def runUntilCurrentTimer(func):
 
@@ -182,6 +202,22 @@ def runUntilCurrentTimer(func):
         end = time.time() * 1000
         tick_time.inc_by(end - start)
         pending_calls_metric.inc_by(num_pending)
+
+        # Check if we need to do a manual GC (since its been disabled), and do
+        # one if necessary.
+        threshold = gc.get_threshold()
+        counts = gc.get_count()
+        for i in (2, 1, 0):
+            if threshold[i] < counts[i]:
+                logger.info("Collecting gc %d", i)
+
+                start = time.time() * 1000
+                unreachable = gc.collect(i)
+                end = time.time() * 1000
+
+                gc_time.inc_by(end - start, i)
+                gc_unreachable.inc_by(unreachable, i)
+
         return ret
 
     return f
@@ -196,5 +232,9 @@ try:
     # runUntilCurrent is called when we have pending calls. It is called once
     # per iteratation after fd polling.
     reactor.runUntilCurrent = runUntilCurrentTimer(reactor.runUntilCurrent)
+
+    # We manually run the GC each reactor tick so that we can get some metrics
+    # about time spent doing GC,
+    gc.disable()
 except AttributeError:
     pass
diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py
index 368fc24984..e81af29895 100644
--- a/synapse/metrics/metric.py
+++ b/synapse/metrics/metric.py
@@ -47,9 +47,6 @@ class BaseMetric(object):
                       for k, v in zip(self.labels, values)])
         )
 
-    def render(self):
-        return map_concat(self.render_item, sorted(self.counts.keys()))
-
 
 class CounterMetric(BaseMetric):
     """The simplest kind of metric; one that stores a monotonically-increasing
@@ -83,6 +80,9 @@ class CounterMetric(BaseMetric):
     def render_item(self, k):
         return ["%s%s %d" % (self.name, self._render_key(k), self.counts[k])]
 
+    def render(self):
+        return map_concat(self.render_item, sorted(self.counts.keys()))
+
 
 class CallbackMetric(BaseMetric):
     """A metric that returns the numeric value returned by a callback whenever
@@ -126,30 +126,70 @@ class DistributionMetric(object):
 
 
 class CacheMetric(object):
-    """A combination of two CounterMetrics, one to count cache hits and one to
-    count a total, and a callback metric to yield the current size.
-
-    This metric generates standard metric name pairs, so that monitoring rules
-    can easily be applied to measure hit ratio."""
+    __slots__ = ("name", "cache_name", "hits", "misses", "size_callback")
 
-    def __init__(self, name, size_callback, labels=[]):
+    def __init__(self, name, size_callback, cache_name):
         self.name = name
+        self.cache_name = cache_name
 
-        self.hits = CounterMetric(name + ":hits", labels=labels)
-        self.total = CounterMetric(name + ":total", labels=labels)
+        self.hits = 0
+        self.misses = 0
 
-        self.size = CallbackMetric(
-            name + ":size",
-            callback=size_callback,
-            labels=labels,
-        )
+        self.size_callback = size_callback
+
+    def inc_hits(self):
+        self.hits += 1
+
+    def inc_misses(self):
+        self.misses += 1
+
+    def render(self):
+        size = self.size_callback()
+        hits = self.hits
+        total = self.misses + self.hits
+
+        return [
+            """%s:hits{name="%s"} %d""" % (self.name, self.cache_name, hits),
+            """%s:total{name="%s"} %d""" % (self.name, self.cache_name, total),
+            """%s:size{name="%s"} %d""" % (self.name, self.cache_name, size),
+        ]
+
+
+class MemoryUsageMetric(object):
+    """Keeps track of the current memory usage, using psutil.
+
+    The class will keep the current min/max/sum/counts of rss over the last
+    WINDOW_SIZE_SEC, by polling UPDATE_HZ times per second
+    """
+
+    UPDATE_HZ = 2  # number of times to get memory per second
+    WINDOW_SIZE_SEC = 30  # the size of the window in seconds
+
+    def __init__(self, hs, psutil):
+        clock = hs.get_clock()
+        self.memory_snapshots = []
+
+        self.process = psutil.Process()
 
-    def inc_hits(self, *values):
-        self.hits.inc(*values)
-        self.total.inc(*values)
+        clock.looping_call(self._update_curr_values, 1000 / self.UPDATE_HZ)
 
-    def inc_misses(self, *values):
-        self.total.inc(*values)
+    def _update_curr_values(self):
+        max_size = self.UPDATE_HZ * self.WINDOW_SIZE_SEC
+        self.memory_snapshots.append(self.process.memory_info().rss)
+        self.memory_snapshots[:] = self.memory_snapshots[-max_size:]
 
     def render(self):
-        return self.hits.render() + self.total.render() + self.size.render()
+        if not self.memory_snapshots:
+            return []
+
+        max_rss = max(self.memory_snapshots)
+        min_rss = min(self.memory_snapshots)
+        sum_rss = sum(self.memory_snapshots)
+        len_rss = len(self.memory_snapshots)
+
+        return [
+            "process_psutil_rss:max %d" % max_rss,
+            "process_psutil_rss:min %d" % min_rss,
+            "process_psutil_rss:total %d" % sum_rss,
+            "process_psutil_rss:count %d" % len_rss,
+        ]
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 33b79c0ec7..30883a0696 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 from twisted.internet import defer
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import AuthError
 
 from synapse.util.logutils import log_function
@@ -140,8 +140,6 @@ class Notifier(object):
     UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000
 
     def __init__(self, hs):
-        self.hs = hs
-
         self.user_to_user_stream = {}
         self.room_to_user_streams = {}
         self.appservice_to_user_streams = {}
@@ -151,10 +149,8 @@ class Notifier(object):
         self.pending_new_room_events = []
 
         self.clock = hs.get_clock()
-
-        hs.get_distributor().observe(
-            "user_joined_room", self._user_joined_room
-        )
+        self.appservice_handler = hs.get_application_service_handler()
+        self.state_handler = hs.get_state_handler()
 
         self.clock.looping_call(
             self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS
@@ -232,9 +228,7 @@ class Notifier(object):
     def _on_new_room_event(self, event, room_stream_id, extra_users=[]):
         """Notify any user streams that are interested in this room event"""
         # poke any interested application service.
-        self.hs.get_handlers().appservice_handler.notify_interested_services(
-            event
-        )
+        self.appservice_handler.notify_interested_services(event)
 
         app_streams = set()
 
@@ -250,6 +244,9 @@ class Notifier(object):
                 )
                 app_streams |= app_user_streams
 
+        if event.type == EventTypes.Member and event.membership == Membership.JOIN:
+            self._user_joined_room(event.state_key, event.room_id)
+
         self.on_new_event(
             "room_key", room_stream_id,
             users=extra_users,
@@ -449,7 +446,7 @@ class Notifier(object):
 
     @defer.inlineCallbacks
     def _is_world_readable(self, room_id):
-        state = yield self.hs.get_state_handler().get_current_state(
+        state = yield self.state_handler.get_current_state(
             room_id,
             EventTypes.RoomHistoryVisibility
         )
@@ -485,9 +482,8 @@ class Notifier(object):
                 user_stream.appservice, set()
             ).add(user_stream)
 
-    def _user_joined_room(self, user, room_id):
-        user = str(user)
-        new_user_stream = self.user_to_user_stream.get(user)
+    def _user_joined_room(self, user_id, room_id):
+        new_user_stream = self.user_to_user_stream.get(user_id)
         if new_user_stream is not None:
             room_streams = self.room_to_user_streams.setdefault(room_id, set())
             room_streams.add(new_user_stream)
diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py
index 9b208668b6..46e768e35c 100644
--- a/synapse/push/action_generator.py
+++ b/synapse/push/action_generator.py
@@ -40,7 +40,7 @@ class ActionGenerator:
     def handle_push_actions_for_event(self, event, context):
         with Measure(self.clock, "handle_push_actions_for_event"):
             bulk_evaluator = yield evaluator_for_event(
-                event, self.hs, self.store
+                event, self.hs, self.store, context.current_state
             )
 
             actions_by_user = yield bulk_evaluator.action_for_event_by_user(
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 25e13b3423..756e5da513 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -14,84 +14,56 @@
 # limitations under the License.
 
 import logging
-import ujson as json
 
 from twisted.internet import defer
 
-from .baserules import list_with_base_rules
 from .push_rule_evaluator import PushRuleEvaluatorForEvent
 
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, Membership
 from synapse.visibility import filter_events_for_clients
 
 
 logger = logging.getLogger(__name__)
 
 
-def decode_rule_json(rule):
-    rule['conditions'] = json.loads(rule['conditions'])
-    rule['actions'] = json.loads(rule['actions'])
-    return rule
-
-
 @defer.inlineCallbacks
 def _get_rules(room_id, user_ids, store):
     rules_by_user = yield store.bulk_get_push_rules(user_ids)
-    rules_enabled_by_user = yield store.bulk_get_push_rules_enabled(user_ids)
-
-    rules_by_user = {
-        uid: list_with_base_rules([
-            decode_rule_json(rule_list)
-            for rule_list in rules_by_user.get(uid, [])
-        ])
-        for uid in user_ids
-    }
-
-    # We apply the rules-enabled map here: bulk_get_push_rules doesn't
-    # fetch disabled rules, but this won't account for any server default
-    # rules the user has disabled, so we need to do this too.
-    for uid in user_ids:
-        if uid not in rules_enabled_by_user:
-            continue
-
-        user_enabled_map = rules_enabled_by_user[uid]
-
-        for i, rule in enumerate(rules_by_user[uid]):
-            rule_id = rule['rule_id']
-
-            if rule_id in user_enabled_map:
-                if rule.get('enabled', True) != bool(user_enabled_map[rule_id]):
-                    # Rules are cached across users.
-                    rule = dict(rule)
-                    rule['enabled'] = bool(user_enabled_map[rule_id])
-                    rules_by_user[uid][i] = rule
+
+    rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None}
 
     defer.returnValue(rules_by_user)
 
 
 @defer.inlineCallbacks
-def evaluator_for_event(event, hs, store):
+def evaluator_for_event(event, hs, store, current_state):
     room_id = event.room_id
-
-    # users in the room who have pushers need to get push rules run because
-    # that's how their pushers work
-    users_with_pushers = yield store.get_users_with_pushers_in_room(room_id)
-
     # We also will want to generate notifs for other people in the room so
     # their unread countss are correct in the event stream, but to avoid
     # generating them for bot / AS users etc, we only do so for people who've
     # sent a read receipt into the room.
 
-    all_in_room = yield store.get_users_in_room(room_id)
-    all_in_room = set(all_in_room)
+    local_users_in_room = set(
+        e.state_key for e in current_state.values()
+        if e.type == EventTypes.Member and e.membership == Membership.JOIN
+        and hs.is_mine_id(e.state_key)
+    )
+
+    # users in the room who have pushers need to get push rules run because
+    # that's how their pushers work
+    if_users_with_pushers = yield store.get_if_users_have_pushers(
+        local_users_in_room
+    )
+    user_ids = set(
+        uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher
+    )
 
-    receipts = yield store.get_receipts_for_room(room_id, "m.read")
+    users_with_receipts = yield store.get_users_with_read_receipts_in_room(room_id)
 
     # any users with pushers must be ours: they have pushers
-    user_ids = set(users_with_pushers)
-    for r in receipts:
-        if hs.is_mine_id(r['user_id']) and r['user_id'] in all_in_room:
-            user_ids.add(r['user_id'])
+    for uid in users_with_receipts:
+        if uid in local_users_in_room:
+            user_ids.add(uid)
 
     # if this event is an invite event, we may need to run rules for the user
     # who's been invited, otherwise they won't get told they've been invited
@@ -102,8 +74,6 @@ def evaluator_for_event(event, hs, store):
             if has_pusher:
                 user_ids.add(invited_user)
 
-    user_ids = list(user_ids)
-
     rules_by_user = yield _get_rules(room_id, user_ids, store)
 
     defer.returnValue(BulkPushRuleEvaluator(
@@ -141,7 +111,10 @@ class BulkPushRuleEvaluator:
             self.store, user_tuples, [event], {event.event_id: current_state}
         )
 
-        room_members = yield self.store.get_users_in_room(self.room_id)
+        room_members = set(
+            e.state_key for e in current_state.values()
+            if e.type == EventTypes.Member and e.membership == Membership.JOIN
+        )
 
         evaluator = PushRuleEvaluatorForEvent(event, len(room_members))
 
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index ae9db9ec2f..e0331b2d2d 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -13,29 +13,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.push.baserules import list_with_base_rules
-
 from synapse.push.rulekinds import (
     PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP
 )
 
 import copy
-import simplejson as json
 
 
-def format_push_rules_for_user(user, rawrules, enabled_map):
+def format_push_rules_for_user(user, ruleslist):
     """Converts a list of rawrules and a enabled map into nested dictionaries
     to match the Matrix client-server format for push rules"""
 
-    ruleslist = []
-    for rawrule in rawrules:
-        rule = dict(rawrule)
-        rule["conditions"] = json.loads(rawrule["conditions"])
-        rule["actions"] = json.loads(rawrule["actions"])
-        ruleslist.append(rule)
-
     # We're going to be mutating this a lot, so do a deep copy
-    ruleslist = copy.deepcopy(list_with_base_rules(ruleslist))
+    ruleslist = copy.deepcopy(ruleslist)
 
     rules = {'global': {}, 'device': {}}
 
@@ -60,9 +50,7 @@ def format_push_rules_for_user(user, rawrules, enabled_map):
 
         template_rule = _rule_to_template(r)
         if template_rule:
-            if r['rule_id'] in enabled_map:
-                template_rule['enabled'] = enabled_map[r['rule_id']]
-            elif 'enabled' in r:
+            if 'enabled' in r:
                 template_rule['enabled'] = r['enabled']
             else:
                 template_rule['enabled'] = True
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index b4b728adc5..6600c9cd55 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 from twisted.internet import defer, reactor
+from twisted.internet.error import AlreadyCalled, AlreadyCancelled
 
 import logging
 
@@ -32,12 +33,20 @@ DELAY_BEFORE_MAIL_MS = 10 * 60 * 1000
 # Each room maintains its own throttle counter, but each new mail notification
 # sends the pending notifications for all rooms.
 THROTTLE_START_MS = 10 * 60 * 1000
-THROTTLE_MAX_MS = 24 * 60 * 60 * 1000  # (2 * 60 * 1000) * (2 ** 11)  # ~3 days
-THROTTLE_MULTIPLIER = 6                # 10 mins, 1 hour, 6 hours, 24 hours
+THROTTLE_MAX_MS = 24 * 60 * 60 * 1000  # 24h
+# THROTTLE_MULTIPLIER = 6              # 10 mins, 1 hour, 6 hours, 24 hours
+THROTTLE_MULTIPLIER = 144              # 10 mins, 24 hours - i.e. jump straight to 1 day
 
 # If no event triggers a notification for this long after the previous,
 # the throttle is released.
-THROTTLE_RESET_AFTER_MS = (2 * 60 * 1000) * (2 ** 11)  # ~3 days
+# 12 hours - a gap of 12 hours in conversation is surely enough to merit a new
+# notification when things get going again...
+THROTTLE_RESET_AFTER_MS = (12 * 60 * 60 * 1000)
+
+# does each email include all unread notifs, or just the ones which have happened
+# since the last mail?
+# XXX: this is currently broken as it includes ones from parted rooms(!)
+INCLUDE_ALL_UNREAD_NOTIFS = False
 
 
 class EmailPusher(object):
@@ -65,7 +74,12 @@ class EmailPusher(object):
         self.processing = False
 
         if self.hs.config.email_enable_notifs:
-            self.mailer = Mailer(self.hs)
+            if 'data' in pusherdict and 'brand' in pusherdict['data']:
+                app_name = pusherdict['data']['brand']
+            else:
+                app_name = self.hs.config.email_app_name
+
+            self.mailer = Mailer(self.hs, app_name)
         else:
             self.mailer = None
 
@@ -79,7 +93,11 @@ class EmailPusher(object):
 
     def on_stop(self):
         if self.timed_call:
-            self.timed_call.cancel()
+            try:
+                self.timed_call.cancel()
+            except (AlreadyCalled, AlreadyCancelled):
+                pass
+            self.timed_call = None
 
     @defer.inlineCallbacks
     def on_new_notifications(self, min_stream_ordering, max_stream_ordering):
@@ -126,9 +144,9 @@ class EmailPusher(object):
         up logging, measures and guards against multiple instances of it
         being run.
         """
-        unprocessed = yield self.store.get_unread_push_actions_for_user_in_range(
-            self.user_id, self.last_stream_ordering, self.max_stream_ordering
-        )
+        start = 0 if INCLUDE_ALL_UNREAD_NOTIFS else self.last_stream_ordering
+        fn = self.store.get_unread_push_actions_for_user_in_range_for_email
+        unprocessed = yield fn(self.user_id, start, self.max_stream_ordering)
 
         soonest_due_at = None
 
@@ -150,7 +168,6 @@ class EmailPusher(object):
                 # we then consider all previously outstanding notifications
                 # to be delivered.
 
-                # debugging:
                 reason = {
                     'room_id': push_action['room_id'],
                     'now': self.clock.time_msec(),
@@ -165,16 +182,22 @@ class EmailPusher(object):
                 yield self.save_last_stream_ordering_and_success(max([
                     ea['stream_ordering'] for ea in unprocessed
                 ]))
-                yield self.sent_notif_update_throttle(
-                    push_action['room_id'], push_action
-                )
+
+                # we update the throttle on all the possible unprocessed push actions
+                for ea in unprocessed:
+                    yield self.sent_notif_update_throttle(
+                        ea['room_id'], ea
+                    )
                 break
             else:
                 if soonest_due_at is None or should_notify_at < soonest_due_at:
                     soonest_due_at = should_notify_at
 
                 if self.timed_call is not None:
-                    self.timed_call.cancel()
+                    try:
+                        self.timed_call.cancel()
+                    except (AlreadyCalled, AlreadyCancelled):
+                        pass
                     self.timed_call = None
 
         if soonest_due_at is not None:
@@ -263,5 +286,5 @@ class EmailPusher(object):
         logger.info("Sending notif email for user %r", self.user_id)
 
         yield self.mailer.send_notification_mail(
-            self.user_id, self.email, push_actions, reason
+            self.app_id, self.user_id, self.email, push_actions, reason
         )
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 3992804845..feedb075e2 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -16,6 +16,7 @@
 from synapse.push import PusherConfigException
 
 from twisted.internet import defer, reactor
+from twisted.internet.error import AlreadyCalled, AlreadyCancelled
 
 import logging
 import push_rule_evaluator
@@ -38,6 +39,7 @@ class HttpPusher(object):
         self.hs = hs
         self.store = self.hs.get_datastore()
         self.clock = self.hs.get_clock()
+        self.state_handler = self.hs.get_state_handler()
         self.user_id = pusherdict['user_name']
         self.app_id = pusherdict['app_id']
         self.app_display_name = pusherdict['app_display_name']
@@ -108,7 +110,11 @@ class HttpPusher(object):
 
     def on_stop(self):
         if self.timed_call:
-            self.timed_call.cancel()
+            try:
+                self.timed_call.cancel()
+            except (AlreadyCalled, AlreadyCancelled):
+                pass
+            self.timed_call = None
 
     @defer.inlineCallbacks
     def _process(self):
@@ -140,7 +146,8 @@ class HttpPusher(object):
         run once per pusher.
         """
 
-        unprocessed = yield self.store.get_unread_push_actions_for_user_in_range(
+        fn = self.store.get_unread_push_actions_for_user_in_range_for_http
+        unprocessed = yield fn(
             self.user_id, self.last_stream_ordering, self.max_stream_ordering
         )
 
@@ -237,7 +244,9 @@ class HttpPusher(object):
 
     @defer.inlineCallbacks
     def _build_notification_dict(self, event, tweaks, badge):
-        ctx = yield push_tools.get_context_for_event(self.hs.get_datastore(), event)
+        ctx = yield push_tools.get_context_for_event(
+            self.state_handler, event, self.user_id
+        )
 
         d = {
             'notification': {
@@ -269,8 +278,8 @@ class HttpPusher(object):
         if 'content' in event:
             d['notification']['content'] = event.content
 
-        if len(ctx['aliases']):
-            d['notification']['room_alias'] = ctx['aliases'][0]
+        # We no longer send aliases separately, instead, we send the human
+        # readable name of the room, which may be an alias.
         if 'sender_display_name' in ctx and len(ctx['sender_display_name']) > 0:
             d['notification']['sender_display_name'] = ctx['sender_display_name']
         if 'name' in ctx and len(ctx['name']) > 0:
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index c2c2ca3fa7..1028731bc9 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -41,11 +41,14 @@ logger = logging.getLogger(__name__)
 
 
 MESSAGE_FROM_PERSON_IN_ROOM = "You have a message on %(app)s from %(person)s " \
-                              "in the %s room..."
+                              "in the %(room)s room..."
 MESSAGE_FROM_PERSON = "You have a message on %(app)s from %(person)s..."
 MESSAGES_FROM_PERSON = "You have messages on %(app)s from %(person)s..."
-MESSAGES_IN_ROOM = "There are some messages on %(app)s for you in the %(room)s room..."
-MESSAGES_IN_ROOMS = "Here are some messages on %(app)s you may have missed..."
+MESSAGES_IN_ROOM = "You have messages on %(app)s in the %(room)s room..."
+MESSAGES_IN_ROOM_AND_OTHERS = \
+    "You have messages on %(app)s in the %(room)s room and others..."
+MESSAGES_FROM_PERSON_AND_OTHERS = \
+    "You have messages on %(app)s from %(person)s and others..."
 INVITE_FROM_PERSON_TO_ROOM = "%(person)s has invited you to join the " \
                              "%(room)s room on %(app)s..."
 INVITE_FROM_PERSON = "%(person)s has invited you to chat on %(app)s..."
@@ -75,12 +78,14 @@ ALLOWED_ATTRS = {
 
 
 class Mailer(object):
-    def __init__(self, hs):
+    def __init__(self, hs, app_name):
         self.hs = hs
         self.store = self.hs.get_datastore()
+        self.auth_handler = self.hs.get_auth_handler()
         self.state_handler = self.hs.get_state_handler()
         loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir)
-        self.app_name = self.hs.config.email_app_name
+        self.app_name = app_name
+        logger.info("Created Mailer for app_name %s" % app_name)
         env = jinja2.Environment(loader=loader)
         env.filters["format_ts"] = format_ts_filter
         env.filters["mxc_to_http"] = self.mxc_to_http_filter
@@ -92,8 +97,16 @@ class Mailer(object):
         )
 
     @defer.inlineCallbacks
-    def send_notification_mail(self, user_id, email_address, push_actions, reason):
-        raw_from = email.utils.parseaddr(self.hs.config.email_notif_from)[1]
+    def send_notification_mail(self, app_id, user_id, email_address,
+                               push_actions, reason):
+        try:
+            from_string = self.hs.config.email_notif_from % {
+                "app": self.app_name
+            }
+        except TypeError:
+            from_string = self.hs.config.email_notif_from
+
+        raw_from = email.utils.parseaddr(from_string)[1]
         raw_to = email.utils.parseaddr(email_address)[1]
 
         if raw_to == '':
@@ -119,6 +132,8 @@ class Mailer(object):
             user_display_name = yield self.store.get_profile_displayname(
                 UserID.from_string(user_id).localpart
             )
+            if user_display_name is None:
+                user_display_name = user_id
         except StoreError:
             user_display_name = user_id
 
@@ -128,9 +143,14 @@ class Mailer(object):
             state_by_room[room_id] = room_state
 
         # Run at most 3 of these at once: sync does 10 at a time but email
-        # notifs are much realtime than sync so we can afford to wait a bit.
+        # notifs are much less realtime than sync so we can afford to wait a bit.
         yield concurrently_execute(_fetch_room_state, rooms_in_order, 3)
 
+        # actually sort our so-called rooms_in_order list, most recent room first
+        rooms_in_order.sort(
+            key=lambda r: -(notifs_by_room[r][-1]['received_ts'] or 0)
+        )
+
         rooms = []
 
         for r in rooms_in_order:
@@ -139,17 +159,19 @@ class Mailer(object):
             )
             rooms.append(roomvars)
 
-        summary_text = self.make_summary_text(
-            notifs_by_room, state_by_room, notif_events, user_id
+        reason['room_name'] = calculate_room_name(
+            state_by_room[reason['room_id']], user_id, fallback_to_members=True
         )
 
-        reason['room_name'] = calculate_room_name(
-            state_by_room[reason['room_id']], user_id, fallback_to_members=False
+        summary_text = self.make_summary_text(
+            notifs_by_room, state_by_room, notif_events, user_id, reason
         )
 
         template_vars = {
             "user_display_name": user_display_name,
-            "unsubscribe_link": self.make_unsubscribe_link(),
+            "unsubscribe_link": self.make_unsubscribe_link(
+                user_id, app_id, email_address
+            ),
             "summary_text": summary_text,
             "app_name": self.app_name,
             "rooms": rooms,
@@ -164,7 +186,7 @@ class Mailer(object):
 
         multipart_msg = MIMEMultipart('alternative')
         multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
-        multipart_msg['From'] = self.hs.config.email_notif_from
+        multipart_msg['From'] = from_string
         multipart_msg['To'] = email_address
         multipart_msg['Date'] = email.utils.formatdate()
         multipart_msg['Message-ID'] = email.utils.make_msgid()
@@ -251,14 +273,16 @@ class Mailer(object):
 
         sender_state_event = room_state[("m.room.member", event.sender)]
         sender_name = name_from_member_event(sender_state_event)
-        sender_avatar_url = sender_state_event.content["avatar_url"]
+        sender_avatar_url = sender_state_event.content.get("avatar_url")
 
         # 'hash' for deterministically picking default images: use
         # sender_hash % the number of default images to choose from
         sender_hash = string_ordinal_total(event.sender)
 
+        msgtype = event.content.get("msgtype")
+
         ret = {
-            "msgtype": event.content["msgtype"],
+            "msgtype": msgtype,
             "is_historical": event.event_id != notif['event_id'],
             "id": event.event_id,
             "ts": event.origin_server_ts,
@@ -267,9 +291,9 @@ class Mailer(object):
             "sender_hash": sender_hash,
         }
 
-        if event.content["msgtype"] == "m.text":
+        if msgtype == "m.text":
             self.add_text_message_vars(ret, event)
-        elif event.content["msgtype"] == "m.image":
+        elif msgtype == "m.image":
             self.add_image_message_vars(ret, event)
 
         if "body" in event.content:
@@ -278,16 +302,17 @@ class Mailer(object):
         return ret
 
     def add_text_message_vars(self, messagevars, event):
-        if "format" in event.content:
-            msgformat = event.content["format"]
-        else:
-            msgformat = None
+        msgformat = event.content.get("format")
+
         messagevars["format"] = msgformat
 
-        if msgformat == "org.matrix.custom.html":
-            messagevars["body_text_html"] = safe_markup(event.content["formatted_body"])
-        else:
-            messagevars["body_text_html"] = safe_text(event.content["body"])
+        formatted_body = event.content.get("formatted_body")
+        body = event.content.get("body")
+
+        if msgformat == "org.matrix.custom.html" and formatted_body:
+            messagevars["body_text_html"] = safe_markup(formatted_body)
+        elif body:
+            messagevars["body_text_html"] = safe_text(body)
 
         return messagevars
 
@@ -296,7 +321,8 @@ class Mailer(object):
 
         return messagevars
 
-    def make_summary_text(self, notifs_by_room, state_by_room, notif_events, user_id):
+    def make_summary_text(self, notifs_by_room, state_by_room,
+                          notif_events, user_id, reason):
         if len(notifs_by_room) == 1:
             # Only one room has new stuff
             room_id = notifs_by_room.keys()[0]
@@ -371,9 +397,28 @@ class Mailer(object):
                     }
         else:
             # Stuff's happened in multiple different rooms
-            return MESSAGES_IN_ROOMS % {
-                "app": self.app_name,
-            }
+
+            # ...but we still refer to the 'reason' room which triggered the mail
+            if reason['room_name'] is not None:
+                return MESSAGES_IN_ROOM_AND_OTHERS % {
+                    "room": reason['room_name'],
+                    "app": self.app_name,
+                }
+            else:
+                # If the reason room doesn't have a name, say who the messages
+                # are from explicitly to avoid, "messages in the Bob room"
+                sender_ids = list(set([
+                    notif_events[n['event_id']].sender
+                    for n in notifs_by_room[reason['room_id']]
+                ]))
+
+                return MESSAGES_FROM_PERSON_AND_OTHERS % {
+                    "person": descriptor_from_member_events([
+                        state_by_room[reason['room_id']][("m.room.member", s)]
+                        for s in sender_ids
+                    ]),
+                    "app": self.app_name,
+                }
 
     def make_room_link(self, room_id):
         # need /beta for Universal Links to work on iOS
@@ -393,9 +438,18 @@ class Mailer(object):
                 notif['room_id'], notif['event_id']
             )
 
-    def make_unsubscribe_link(self):
-        # XXX: matrix.to
-        return "https://vector.im/#/settings"
+    def make_unsubscribe_link(self, user_id, app_id, email_address):
+        params = {
+            "access_token": self.auth_handler.generate_delete_pusher_token(user_id),
+            "app_id": app_id,
+            "pushkey": email_address,
+        }
+
+        # XXX: make r0 once API is stable
+        return "%s_matrix/client/unstable/pushers/remove?%s" % (
+            self.hs.config.public_baseurl,
+            urllib.urlencode(params),
+        )
 
     def mxc_to_http_filter(self, value, width, height, resize_method="crop"):
         if value[0:6] != "mxc://":
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index 89a3b5e90a..d555a33e9a 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -14,6 +14,9 @@
 # limitations under the License.
 
 from twisted.internet import defer
+from synapse.util.presentable_names import (
+    calculate_room_name, name_from_member_event
+)
 
 
 @defer.inlineCallbacks
@@ -45,24 +48,21 @@ def get_badge_count(store, user_id):
 
 
 @defer.inlineCallbacks
-def get_context_for_event(store, ev):
-    name_aliases = yield store.get_room_name_and_aliases(
-        ev.room_id
-    )
+def get_context_for_event(state_handler, ev, user_id):
+    ctx = {}
 
-    ctx = {'aliases': name_aliases[1]}
-    if name_aliases[0] is not None:
-        ctx['name'] = name_aliases[0]
+    room_state = yield state_handler.get_current_state(ev.room_id)
 
-    their_member_events_for_room = yield store.get_current_state(
-        room_id=ev.room_id,
-        event_type='m.room.member',
-        state_key=ev.user_id
+    # we no longer bother setting room_alias, and make room_name the
+    # human-readable name instead, be that m.room.name, an alias or
+    # a list of people in the room
+    name = calculate_room_name(
+        room_state, user_id, fallback_to_single_member=False
     )
-    for mev in their_member_events_for_room:
-        if mev.content['membership'] == 'join' and 'displayname' in mev.content:
-            dn = mev.content['displayname']
-            if dn is not None:
-                ctx['sender_display_name'] = dn
+    if name:
+        ctx['name'] = name
+
+    sender_state_event = room_state[("m.room.member", ev.sender)]
+    ctx['sender_display_name'] = name_from_member_event(sender_state_event)
 
     defer.returnValue(ctx)
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index e0a7a19777..86e3d89154 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -48,6 +48,12 @@ CONDITIONAL_REQUIREMENTS = {
         "Jinja2>=2.8": ["Jinja2>=2.8"],
         "bleach>=1.4.2": ["bleach>=1.4.2"],
     },
+    "ldap": {
+        "ldap3>=1.0": ["ldap3>=1.0"],
+    },
+    "psutil": {
+        "psutil>=2.0.0": ["psutil>=2.0.0"],
+    },
 }
 
 
diff --git a/synapse/replication/presence_resource.py b/synapse/replication/presence_resource.py
new file mode 100644
index 0000000000..fc18130ab4
--- /dev/null
+++ b/synapse/replication/presence_resource.py
@@ -0,0 +1,59 @@
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.http.server import respond_with_json_bytes, request_handler
+from synapse.http.servlet import parse_json_object_from_request
+
+from twisted.web.resource import Resource
+from twisted.web.server import NOT_DONE_YET
+from twisted.internet import defer
+
+
+class PresenceResource(Resource):
+    """
+    HTTP endpoint for marking users as syncing.
+
+    POST /_synapse/replication/presence HTTP/1.1
+    Content-Type: application/json
+
+    {
+        "process_id": "<process_id>",
+        "syncing_users": ["<user_id>"]
+    }
+    """
+
+    def __init__(self, hs):
+        Resource.__init__(self)  # Resource is old-style, so no super()
+
+        self.version_string = hs.version_string
+        self.clock = hs.get_clock()
+        self.presence_handler = hs.get_presence_handler()
+
+    def render_POST(self, request):
+        self._async_render_POST(request)
+        return NOT_DONE_YET
+
+    @request_handler()
+    @defer.inlineCallbacks
+    def _async_render_POST(self, request):
+        content = parse_json_object_from_request(request)
+
+        process_id = content["process_id"]
+        syncing_user_ids = content["syncing_users"]
+
+        yield self.presence_handler.update_external_syncs(
+            process_id, set(syncing_user_ids)
+        )
+
+        respond_with_json_bytes(request, 200, "{}")
diff --git a/synapse/replication/resource.py b/synapse/replication/resource.py
index 847f212a3d..8c2d487ff4 100644
--- a/synapse/replication/resource.py
+++ b/synapse/replication/resource.py
@@ -16,6 +16,7 @@
 from synapse.http.servlet import parse_integer, parse_string
 from synapse.http.server import request_handler, finish_request
 from synapse.replication.pusher_resource import PusherResource
+from synapse.replication.presence_resource import PresenceResource
 
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
@@ -115,6 +116,7 @@ class ReplicationResource(Resource):
         self.clock = hs.get_clock()
 
         self.putChild("remove_pushers", PusherResource(hs))
+        self.putChild("syncing_users", PresenceResource(hs))
 
     def render_GET(self, request):
         self._async_render_GET(request)
diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py
index f59b0eabbc..735c03c7eb 100644
--- a/synapse/replication/slave/storage/account_data.py
+++ b/synapse/replication/slave/storage/account_data.py
@@ -15,7 +15,10 @@
 
 from ._base import BaseSlavedStore
 from ._slaved_id_tracker import SlavedIdTracker
+from synapse.storage import DataStore
 from synapse.storage.account_data import AccountDataStore
+from synapse.storage.tags import TagsStore
+from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 
 class SlavedAccountDataStore(BaseSlavedStore):
@@ -25,6 +28,14 @@ class SlavedAccountDataStore(BaseSlavedStore):
         self._account_data_id_gen = SlavedIdTracker(
             db_conn, "account_data_max_stream_id", "stream_id",
         )
+        self._account_data_stream_cache = StreamChangeCache(
+            "AccountDataAndTagsChangeCache",
+            self._account_data_id_gen.get_current_token(),
+        )
+
+    get_account_data_for_user = (
+        AccountDataStore.__dict__["get_account_data_for_user"]
+    )
 
     get_global_account_data_by_type_for_users = (
         AccountDataStore.__dict__["get_global_account_data_by_type_for_users"]
@@ -34,6 +45,16 @@ class SlavedAccountDataStore(BaseSlavedStore):
         AccountDataStore.__dict__["get_global_account_data_by_type_for_user"]
     )
 
+    get_tags_for_user = TagsStore.__dict__["get_tags_for_user"]
+
+    get_updated_tags = DataStore.get_updated_tags.__func__
+    get_updated_account_data_for_user = (
+        DataStore.get_updated_account_data_for_user.__func__
+    )
+
+    def get_max_account_data_stream_id(self):
+        return self._account_data_id_gen.get_current_token()
+
     def stream_positions(self):
         result = super(SlavedAccountDataStore, self).stream_positions()
         position = self._account_data_id_gen.get_current_token()
@@ -47,15 +68,33 @@ class SlavedAccountDataStore(BaseSlavedStore):
         if stream:
             self._account_data_id_gen.advance(int(stream["position"]))
             for row in stream["rows"]:
-                user_id, data_type = row[1:3]
+                position, user_id, data_type = row[:3]
                 self.get_global_account_data_by_type_for_user.invalidate(
                     (data_type, user_id,)
                 )
+                self.get_account_data_for_user.invalidate((user_id,))
+                self._account_data_stream_cache.entity_has_changed(
+                    user_id, position
+                )
 
         stream = result.get("room_account_data")
         if stream:
             self._account_data_id_gen.advance(int(stream["position"]))
+            for row in stream["rows"]:
+                position, user_id = row[:2]
+                self.get_account_data_for_user.invalidate((user_id,))
+                self._account_data_stream_cache.entity_has_changed(
+                    user_id, position
+                )
 
         stream = result.get("tag_account_data")
         if stream:
             self._account_data_id_gen.advance(int(stream["position"]))
+            for row in stream["rows"]:
+                position, user_id = row[:2]
+                self.get_tags_for_user.invalidate((user_id,))
+                self._account_data_stream_cache.entity_has_changed(
+                    user_id, position
+                )
+
+        return super(SlavedAccountDataStore, self).process_replication(result)
diff --git a/synapse/replication/slave/storage/appservice.py b/synapse/replication/slave/storage/appservice.py
new file mode 100644
index 0000000000..25792d9429
--- /dev/null
+++ b/synapse/replication/slave/storage/appservice.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from synapse.storage import DataStore
+from synapse.config.appservice import load_appservices
+
+
+class SlavedApplicationServiceStore(BaseSlavedStore):
+    def __init__(self, db_conn, hs):
+        super(SlavedApplicationServiceStore, self).__init__(db_conn, hs)
+        self.services_cache = load_appservices(
+            hs.config.server_name,
+            hs.config.app_service_config_files
+        )
+
+    get_app_service_by_token = DataStore.get_app_service_by_token.__func__
+    get_app_service_by_user_id = DataStore.get_app_service_by_user_id.__func__
diff --git a/synapse/replication/slave/storage/directory.py b/synapse/replication/slave/storage/directory.py
new file mode 100644
index 0000000000..5fbe3a303a
--- /dev/null
+++ b/synapse/replication/slave/storage/directory.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from synapse.storage.directory import DirectoryStore
+
+
+class DirectoryStore(BaseSlavedStore):
+    get_aliases_for_room = DirectoryStore.__dict__[
+        "get_aliases_for_room"
+    ].orig
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index c0d741452d..f4f31f2d27 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -18,11 +18,11 @@ from ._slaved_id_tracker import SlavedIdTracker
 from synapse.api.constants import EventTypes
 from synapse.events import FrozenEvent
 from synapse.storage import DataStore
-from synapse.storage.room import RoomStore
 from synapse.storage.roommember import RoomMemberStore
 from synapse.storage.event_federation import EventFederationStore
 from synapse.storage.event_push_actions import EventPushActionsStore
 from synapse.storage.state import StateStore
+from synapse.storage.stream import StreamStore
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 import ujson as json
@@ -57,10 +57,12 @@ class SlavedEventStore(BaseSlavedStore):
             "EventsRoomStreamChangeCache", min_event_val,
             prefilled_cache=event_cache_prefill,
         )
+        self._membership_stream_cache = StreamChangeCache(
+            "MembershipStreamChangeCache", events_max,
+        )
 
     # Cached functions can't be accessed through a class instance so we need
     # to reach inside the __dict__ to extract them.
-    get_room_name_and_aliases = RoomStore.__dict__["get_room_name_and_aliases"]
     get_rooms_for_user = RoomMemberStore.__dict__["get_rooms_for_user"]
     get_users_in_room = RoomMemberStore.__dict__["get_users_in_room"]
     get_latest_event_ids_in_room = EventFederationStore.__dict__[
@@ -87,9 +89,15 @@ class SlavedEventStore(BaseSlavedStore):
     _get_state_group_from_group = (
         StateStore.__dict__["_get_state_group_from_group"]
     )
+    get_recent_event_ids_for_room = (
+        StreamStore.__dict__["get_recent_event_ids_for_room"]
+    )
 
-    get_unread_push_actions_for_user_in_range = (
-        DataStore.get_unread_push_actions_for_user_in_range.__func__
+    get_unread_push_actions_for_user_in_range_for_http = (
+        DataStore.get_unread_push_actions_for_user_in_range_for_http.__func__
+    )
+    get_unread_push_actions_for_user_in_range_for_email = (
+        DataStore.get_unread_push_actions_for_user_in_range_for_email.__func__
     )
     get_push_action_users_in_range = (
         DataStore.get_push_action_users_in_range.__func__
@@ -109,24 +117,25 @@ class SlavedEventStore(BaseSlavedStore):
         DataStore.get_room_events_stream_for_room.__func__
     )
     get_events_around = DataStore.get_events_around.__func__
+    get_state_for_event = DataStore.get_state_for_event.__func__
     get_state_for_events = DataStore.get_state_for_events.__func__
     get_state_groups = DataStore.get_state_groups.__func__
+    get_recent_events_for_room = DataStore.get_recent_events_for_room.__func__
+    get_room_events_stream_for_rooms = (
+        DataStore.get_room_events_stream_for_rooms.__func__
+    )
+    get_stream_token_for_event = DataStore.get_stream_token_for_event.__func__
 
-    _set_before_and_after = DataStore._set_before_and_after
+    _set_before_and_after = staticmethod(DataStore._set_before_and_after)
 
     _get_events = DataStore._get_events.__func__
     _get_events_from_cache = DataStore._get_events_from_cache.__func__
 
     _invalidate_get_event_cache = DataStore._invalidate_get_event_cache.__func__
-    _parse_events_txn = DataStore._parse_events_txn.__func__
-    _get_events_txn = DataStore._get_events_txn.__func__
-    _get_event_txn = DataStore._get_event_txn.__func__
     _enqueue_events = DataStore._enqueue_events.__func__
     _do_fetch = DataStore._do_fetch.__func__
-    _fetch_events_txn = DataStore._fetch_events_txn.__func__
     _fetch_event_rows = DataStore._fetch_event_rows.__func__
     _get_event_from_row = DataStore._get_event_from_row.__func__
-    _get_event_from_row_txn = DataStore._get_event_from_row_txn.__func__
     _get_rooms_for_user_where_membership_is_txn = (
         DataStore._get_rooms_for_user_where_membership_is_txn.__func__
     )
@@ -136,6 +145,15 @@ class SlavedEventStore(BaseSlavedStore):
     _get_events_around_txn = DataStore._get_events_around_txn.__func__
     _get_some_state_from_cache = DataStore._get_some_state_from_cache.__func__
 
+    get_backfill_events = DataStore.get_backfill_events.__func__
+    _get_backfill_events = DataStore._get_backfill_events.__func__
+    get_missing_events = DataStore.get_missing_events.__func__
+    _get_missing_events = DataStore._get_missing_events.__func__
+
+    get_auth_chain = DataStore.get_auth_chain.__func__
+    get_auth_chain_ids = DataStore.get_auth_chain_ids.__func__
+    _get_auth_chain_ids_txn = DataStore._get_auth_chain_ids_txn.__func__
+
     def stream_positions(self):
         result = super(SlavedEventStore, self).stream_positions()
         result["events"] = self._stream_id_gen.get_current_token()
@@ -194,7 +212,6 @@ class SlavedEventStore(BaseSlavedStore):
             self.get_rooms_for_user.invalidate_all()
             self.get_users_in_room.invalidate((event.room_id,))
             # self.get_joined_hosts_for_room.invalidate((event.room_id,))
-            self.get_room_name_and_aliases.invalidate((event.room_id,))
 
         self._invalidate_get_event_cache(event.event_id)
 
@@ -220,9 +237,9 @@ class SlavedEventStore(BaseSlavedStore):
             self.get_rooms_for_user.invalidate((event.state_key,))
             # self.get_joined_hosts_for_room.invalidate((event.room_id,))
             self.get_users_in_room.invalidate((event.room_id,))
-            # self._membership_stream_cache.entity_has_changed(
-            #    event.state_key, event.internal_metadata.stream_ordering
-            # )
+            self._membership_stream_cache.entity_has_changed(
+                event.state_key, event.internal_metadata.stream_ordering
+            )
             self.get_invited_rooms_for_user.invalidate((event.state_key,))
 
         if not event.is_state():
@@ -238,9 +255,3 @@ class SlavedEventStore(BaseSlavedStore):
         self._get_current_state_for_key.invalidate((
             event.room_id, event.type, event.state_key
         ))
-
-        if event.type in [EventTypes.Name, EventTypes.Aliases]:
-            self.get_room_name_and_aliases.invalidate(
-                (event.room_id,)
-            )
-            pass
diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py
new file mode 100644
index 0000000000..819ed62881
--- /dev/null
+++ b/synapse/replication/slave/storage/filtering.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from synapse.storage.filtering import FilteringStore
+
+
+class SlavedFilteringStore(BaseSlavedStore):
+    def __init__(self, db_conn, hs):
+        super(SlavedFilteringStore, self).__init__(db_conn, hs)
+
+    # Filters are immutable so this cache doesn't need to be expired
+    get_user_filter = FilteringStore.__dict__["get_user_filter"]
diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py
new file mode 100644
index 0000000000..dd2ae49e48
--- /dev/null
+++ b/synapse/replication/slave/storage/keys.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from synapse.storage import DataStore
+from synapse.storage.keys import KeyStore
+
+
+class SlavedKeyStore(BaseSlavedStore):
+    _get_server_verify_key = KeyStore.__dict__[
+        "_get_server_verify_key"
+    ]
+
+    get_server_verify_keys = DataStore.get_server_verify_keys.__func__
+    store_server_verify_key = DataStore.store_server_verify_key.__func__
+
+    get_server_certificate = DataStore.get_server_certificate.__func__
+    store_server_certificate = DataStore.store_server_certificate.__func__
+
+    get_server_keys_json = DataStore.get_server_keys_json.__func__
+    store_server_keys_json = DataStore.store_server_keys_json.__func__
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
new file mode 100644
index 0000000000..703f4a49bf
--- /dev/null
+++ b/synapse/replication/slave/storage/presence.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from ._slaved_id_tracker import SlavedIdTracker
+
+from synapse.util.caches.stream_change_cache import StreamChangeCache
+from synapse.storage import DataStore
+
+
+class SlavedPresenceStore(BaseSlavedStore):
+    def __init__(self, db_conn, hs):
+        super(SlavedPresenceStore, self).__init__(db_conn, hs)
+        self._presence_id_gen = SlavedIdTracker(
+            db_conn, "presence_stream", "stream_id",
+        )
+
+        self._presence_on_startup = self._get_active_presence(db_conn)
+
+        self.presence_stream_cache = self.presence_stream_cache = StreamChangeCache(
+            "PresenceStreamChangeCache", self._presence_id_gen.get_current_token()
+        )
+
+    _get_active_presence = DataStore._get_active_presence.__func__
+    take_presence_startup_info = DataStore.take_presence_startup_info.__func__
+    get_presence_for_users = DataStore.get_presence_for_users.__func__
+
+    def get_current_presence_token(self):
+        return self._presence_id_gen.get_current_token()
+
+    def stream_positions(self):
+        result = super(SlavedPresenceStore, self).stream_positions()
+        position = self._presence_id_gen.get_current_token()
+        result["presence"] = position
+        return result
+
+    def process_replication(self, result):
+        stream = result.get("presence")
+        if stream:
+            self._presence_id_gen.advance(int(stream["position"]))
+            for row in stream["rows"]:
+                position, user_id = row[:2]
+                self.presence_stream_cache.entity_has_changed(
+                    user_id, position
+                )
+
+        return super(SlavedPresenceStore, self).process_replication(result)
diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py
new file mode 100644
index 0000000000..21ceb0213a
--- /dev/null
+++ b/synapse/replication/slave/storage/push_rule.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .events import SlavedEventStore
+from ._slaved_id_tracker import SlavedIdTracker
+from synapse.storage import DataStore
+from synapse.storage.push_rule import PushRuleStore
+from synapse.util.caches.stream_change_cache import StreamChangeCache
+
+
+class SlavedPushRuleStore(SlavedEventStore):
+    def __init__(self, db_conn, hs):
+        super(SlavedPushRuleStore, self).__init__(db_conn, hs)
+        self._push_rules_stream_id_gen = SlavedIdTracker(
+            db_conn, "push_rules_stream", "stream_id",
+        )
+        self.push_rules_stream_cache = StreamChangeCache(
+            "PushRulesStreamChangeCache",
+            self._push_rules_stream_id_gen.get_current_token(),
+        )
+
+    get_push_rules_for_user = PushRuleStore.__dict__["get_push_rules_for_user"]
+    get_push_rules_enabled_for_user = (
+        PushRuleStore.__dict__["get_push_rules_enabled_for_user"]
+    )
+    have_push_rules_changed_for_user = (
+        DataStore.have_push_rules_changed_for_user.__func__
+    )
+
+    def get_push_rules_stream_token(self):
+        return (
+            self._push_rules_stream_id_gen.get_current_token(),
+            self._stream_id_gen.get_current_token(),
+        )
+
+    def stream_positions(self):
+        result = super(SlavedPushRuleStore, self).stream_positions()
+        result["push_rules"] = self._push_rules_stream_id_gen.get_current_token()
+        return result
+
+    def process_replication(self, result):
+        stream = result.get("push_rules")
+        if stream:
+            for row in stream["rows"]:
+                position = row[0]
+                user_id = row[2]
+                self.get_push_rules_for_user.invalidate((user_id,))
+                self.get_push_rules_enabled_for_user.invalidate((user_id,))
+                self.push_rules_stream_cache.entity_has_changed(
+                    user_id, position
+                )
+
+            self._push_rules_stream_id_gen.advance(int(stream["position"]))
+
+        return super(SlavedPushRuleStore, self).process_replication(result)
diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py
index ec007516d0..ac9662d399 100644
--- a/synapse/replication/slave/storage/receipts.py
+++ b/synapse/replication/slave/storage/receipts.py
@@ -18,6 +18,7 @@ from ._slaved_id_tracker import SlavedIdTracker
 
 from synapse.storage import DataStore
 from synapse.storage.receipts import ReceiptsStore
+from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 # So, um, we want to borrow a load of functions intended for reading from
 # a DataStore, but we don't want to take functions that either write to the
@@ -37,11 +38,28 @@ class SlavedReceiptsStore(BaseSlavedStore):
             db_conn, "receipts_linearized", "stream_id"
         )
 
+        self._receipts_stream_cache = StreamChangeCache(
+            "ReceiptsRoomChangeCache", self._receipts_id_gen.get_current_token()
+        )
+
     get_receipts_for_user = ReceiptsStore.__dict__["get_receipts_for_user"]
+    get_linearized_receipts_for_room = (
+        ReceiptsStore.__dict__["get_linearized_receipts_for_room"]
+    )
+    _get_linearized_receipts_for_rooms = (
+        ReceiptsStore.__dict__["_get_linearized_receipts_for_rooms"]
+    )
+    get_last_receipt_event_id_for_user = (
+        ReceiptsStore.__dict__["get_last_receipt_event_id_for_user"]
+    )
 
     get_max_receipt_stream_id = DataStore.get_max_receipt_stream_id.__func__
     get_all_updated_receipts = DataStore.get_all_updated_receipts.__func__
 
+    get_linearized_receipts_for_rooms = (
+        DataStore.get_linearized_receipts_for_rooms.__func__
+    )
+
     def stream_positions(self):
         result = super(SlavedReceiptsStore, self).stream_positions()
         result["receipts"] = self._receipts_id_gen.get_current_token()
@@ -52,10 +70,15 @@ class SlavedReceiptsStore(BaseSlavedStore):
         if stream:
             self._receipts_id_gen.advance(int(stream["position"]))
             for row in stream["rows"]:
-                room_id, receipt_type, user_id = row[1:4]
+                position, room_id, receipt_type, user_id = row[:4]
                 self.invalidate_caches_for_receipt(room_id, receipt_type, user_id)
+                self._receipts_stream_cache.entity_has_changed(room_id, position)
 
         return super(SlavedReceiptsStore, self).process_replication(result)
 
     def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id):
         self.get_receipts_for_user.invalidate((user_id, receipt_type))
+        self.get_linearized_receipts_for_room.invalidate_many((room_id,))
+        self.get_last_receipt_event_id_for_user.invalidate(
+            (user_id, room_id, receipt_type)
+        )
diff --git a/synapse/replication/slave/storage/registration.py b/synapse/replication/slave/storage/registration.py
new file mode 100644
index 0000000000..307833f9e1
--- /dev/null
+++ b/synapse/replication/slave/storage/registration.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from synapse.storage import DataStore
+from synapse.storage.registration import RegistrationStore
+
+
+class SlavedRegistrationStore(BaseSlavedStore):
+    def __init__(self, db_conn, hs):
+        super(SlavedRegistrationStore, self).__init__(db_conn, hs)
+
+    # TODO: use the cached version and invalidate deleted tokens
+    get_user_by_access_token = RegistrationStore.__dict__[
+        "get_user_by_access_token"
+    ].orig
+
+    _query_for_auth = DataStore._query_for_auth.__func__
diff --git a/synapse/replication/slave/storage/room.py b/synapse/replication/slave/storage/room.py
new file mode 100644
index 0000000000..d5bb0f98ea
--- /dev/null
+++ b/synapse/replication/slave/storage/room.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from synapse.storage import DataStore
+
+
+class RoomStore(BaseSlavedStore):
+    get_public_room_ids = DataStore.get_public_room_ids.__func__
diff --git a/synapse/replication/slave/storage/transactions.py b/synapse/replication/slave/storage/transactions.py
new file mode 100644
index 0000000000..6f2ba98af5
--- /dev/null
+++ b/synapse/replication/slave/storage/transactions.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+from ._base import BaseSlavedStore
+from synapse.storage import DataStore
+from synapse.storage.transactions import TransactionStore
+
+
+class TransactionStore(BaseSlavedStore):
+    get_destination_retry_timings = TransactionStore.__dict__[
+        "get_destination_retry_timings"
+    ].orig
+    _get_destination_retry_timings = DataStore._get_destination_retry_timings.__func__
+
+    # For now, don't record the destination rety timings
+    def set_destination_retry_timings(*args, **kwargs):
+        return defer.succeed(None)
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index c729dee47a..7c23f5a4a8 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -47,6 +47,7 @@ from synapse.rest.client.v2_alpha import (
     report_event,
     openid,
     notifications,
+    devices,
 )
 
 from synapse.http.server import JsonResource
@@ -92,3 +93,4 @@ class ClientRestResource(JsonResource):
         report_event.register_servlets(hs, client_resource)
         openid.register_servlets(hs, client_resource)
         notifications.register_servlets(hs, client_resource)
+        devices.register_servlets(hs, client_resource)
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py
index aa05b3f023..b0cb31a448 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/client/v1/admin.py
@@ -46,5 +46,82 @@ class WhoisRestServlet(ClientV1RestServlet):
         defer.returnValue((200, ret))
 
 
+class PurgeMediaCacheRestServlet(ClientV1RestServlet):
+    PATTERNS = client_path_patterns("/admin/purge_media_cache")
+
+    def __init__(self, hs):
+        self.media_repository = hs.get_media_repository()
+        super(PurgeMediaCacheRestServlet, self).__init__(hs)
+
+    @defer.inlineCallbacks
+    def on_POST(self, request):
+        requester = yield self.auth.get_user_by_req(request)
+        is_admin = yield self.auth.is_server_admin(requester.user)
+
+        if not is_admin:
+            raise AuthError(403, "You are not a server admin")
+
+        before_ts = request.args.get("before_ts", None)
+        if not before_ts:
+            raise SynapseError(400, "Missing 'before_ts' arg")
+
+        logger.info("before_ts: %r", before_ts[0])
+
+        try:
+            before_ts = int(before_ts[0])
+        except Exception:
+            raise SynapseError(400, "Invalid 'before_ts' arg")
+
+        ret = yield self.media_repository.delete_old_remote_media(before_ts)
+
+        defer.returnValue((200, ret))
+
+
+class PurgeHistoryRestServlet(ClientV1RestServlet):
+    PATTERNS = client_path_patterns(
+        "/admin/purge_history/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+    )
+
+    @defer.inlineCallbacks
+    def on_POST(self, request, room_id, event_id):
+        requester = yield self.auth.get_user_by_req(request)
+        is_admin = yield self.auth.is_server_admin(requester.user)
+
+        if not is_admin:
+            raise AuthError(403, "You are not a server admin")
+
+        yield self.handlers.message_handler.purge_history(room_id, event_id)
+
+        defer.returnValue((200, {}))
+
+
+class DeactivateAccountRestServlet(ClientV1RestServlet):
+    PATTERNS = client_path_patterns("/admin/deactivate/(?P<target_user_id>[^/]*)")
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        super(DeactivateAccountRestServlet, self).__init__(hs)
+
+    @defer.inlineCallbacks
+    def on_POST(self, request, target_user_id):
+        UserID.from_string(target_user_id)
+        requester = yield self.auth.get_user_by_req(request)
+        is_admin = yield self.auth.is_server_admin(requester.user)
+
+        if not is_admin:
+            raise AuthError(403, "You are not a server admin")
+
+        # FIXME: Theoretically there is a race here wherein user resets password
+        # using threepid.
+        yield self.store.user_delete_access_tokens(target_user_id)
+        yield self.store.user_delete_threepids(target_user_id)
+        yield self.store.user_set_password_hash(target_user_id, None)
+
+        defer.returnValue((200, {}))
+
+
 def register_servlets(hs, http_server):
     WhoisRestServlet(hs).register(http_server)
+    PurgeMediaCacheRestServlet(hs).register(http_server)
+    DeactivateAccountRestServlet(hs).register(http_server)
+    PurgeHistoryRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py
index 1c020b7e2c..96b49b01f2 100644
--- a/synapse/rest/client/v1/base.py
+++ b/synapse/rest/client/v1/base.py
@@ -52,6 +52,10 @@ class ClientV1RestServlet(RestServlet):
     """
 
     def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer):
+        """
         self.hs = hs
         self.handlers = hs.get_handlers()
         self.builder_factory = hs.get_event_builder_factory()
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index d1afa0f0d5..498bb9e18a 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -45,30 +45,27 @@ class EventStreamRestServlet(ClientV1RestServlet):
                 raise SynapseError(400, "Guest users must specify room_id param")
         if "room_id" in request.args:
             room_id = request.args["room_id"][0]
-        try:
-            handler = self.handlers.event_stream_handler
-            pagin_config = PaginationConfig.from_request(request)
-            timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
-            if "timeout" in request.args:
-                try:
-                    timeout = int(request.args["timeout"][0])
-                except ValueError:
-                    raise SynapseError(400, "timeout must be in milliseconds.")
-
-            as_client_event = "raw" not in request.args
-
-            chunk = yield handler.get_stream(
-                requester.user.to_string(),
-                pagin_config,
-                timeout=timeout,
-                as_client_event=as_client_event,
-                affect_presence=(not is_guest),
-                room_id=room_id,
-                is_guest=is_guest,
-            )
-        except:
-            logger.exception("Event stream failed")
-            raise
+
+        handler = self.handlers.event_stream_handler
+        pagin_config = PaginationConfig.from_request(request)
+        timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
+        if "timeout" in request.args:
+            try:
+                timeout = int(request.args["timeout"][0])
+            except ValueError:
+                raise SynapseError(400, "timeout must be in milliseconds.")
+
+        as_client_event = "raw" not in request.args
+
+        chunk = yield handler.get_stream(
+            requester.user.to_string(),
+            pagin_config,
+            timeout=timeout,
+            as_client_event=as_client_event,
+            affect_presence=(not is_guest),
+            room_id=room_id,
+            is_guest=is_guest,
+        )
 
         defer.returnValue((200, chunk))
 
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 3b5544851b..b31e27f7b3 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -54,10 +54,8 @@ class LoginRestServlet(ClientV1RestServlet):
         self.jwt_secret = hs.config.jwt_secret
         self.jwt_algorithm = hs.config.jwt_algorithm
         self.cas_enabled = hs.config.cas_enabled
-        self.cas_server_url = hs.config.cas_server_url
-        self.cas_required_attributes = hs.config.cas_required_attributes
-        self.servername = hs.config.server_name
-        self.http_client = hs.get_simple_http_client()
+        self.auth_handler = self.hs.get_auth_handler()
+        self.device_handler = self.hs.get_device_handler()
 
     def on_GET(self, request):
         flows = []
@@ -108,17 +106,6 @@ class LoginRestServlet(ClientV1RestServlet):
                                        LoginRestServlet.JWT_TYPE):
                 result = yield self.do_jwt_login(login_submission)
                 defer.returnValue(result)
-            # TODO Delete this after all CAS clients switch to token login instead
-            elif self.cas_enabled and (login_submission["type"] ==
-                                       LoginRestServlet.CAS_TYPE):
-                uri = "%s/proxyValidate" % (self.cas_server_url,)
-                args = {
-                    "ticket": login_submission["ticket"],
-                    "service": login_submission["service"]
-                }
-                body = yield self.http_client.get_raw(uri, args)
-                result = yield self.do_cas_login(body)
-                defer.returnValue(result)
             elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE:
                 result = yield self.do_token_login(login_submission)
                 defer.returnValue(result)
@@ -143,16 +130,24 @@ class LoginRestServlet(ClientV1RestServlet):
                 user_id, self.hs.hostname
             ).to_string()
 
-        auth_handler = self.handlers.auth_handler
-        user_id, access_token, refresh_token = yield auth_handler.login_with_password(
+        auth_handler = self.auth_handler
+        user_id = yield auth_handler.validate_password_login(
             user_id=user_id,
-            password=login_submission["password"])
-
+            password=login_submission["password"],
+        )
+        device_id = yield self._register_device(user_id, login_submission)
+        access_token, refresh_token = (
+            yield auth_handler.get_login_tuple_for_user_id(
+                user_id, device_id,
+                login_submission.get("initial_device_display_name")
+            )
+        )
         result = {
             "user_id": user_id,  # may have changed
             "access_token": access_token,
             "refresh_token": refresh_token,
             "home_server": self.hs.hostname,
+            "device_id": device_id,
         }
 
         defer.returnValue((200, result))
@@ -160,65 +155,27 @@ class LoginRestServlet(ClientV1RestServlet):
     @defer.inlineCallbacks
     def do_token_login(self, login_submission):
         token = login_submission['token']
-        auth_handler = self.handlers.auth_handler
+        auth_handler = self.auth_handler
         user_id = (
             yield auth_handler.validate_short_term_login_token_and_get_user_id(token)
         )
-        user_id, access_token, refresh_token = (
-            yield auth_handler.get_login_tuple_for_user_id(user_id)
+        device_id = yield self._register_device(user_id, login_submission)
+        access_token, refresh_token = (
+            yield auth_handler.get_login_tuple_for_user_id(
+                user_id, device_id,
+                login_submission.get("initial_device_display_name")
+            )
         )
         result = {
             "user_id": user_id,  # may have changed
             "access_token": access_token,
             "refresh_token": refresh_token,
             "home_server": self.hs.hostname,
+            "device_id": device_id,
         }
 
         defer.returnValue((200, result))
 
-    # TODO Delete this after all CAS clients switch to token login instead
-    @defer.inlineCallbacks
-    def do_cas_login(self, cas_response_body):
-        user, attributes = self.parse_cas_response(cas_response_body)
-
-        for required_attribute, required_value in self.cas_required_attributes.items():
-            # If required attribute was not in CAS Response - Forbidden
-            if required_attribute not in attributes:
-                raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
-
-            # Also need to check value
-            if required_value is not None:
-                actual_value = attributes[required_attribute]
-                # If required attribute value does not match expected - Forbidden
-                if required_value != actual_value:
-                    raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
-
-        user_id = UserID.create(user, self.hs.hostname).to_string()
-        auth_handler = self.handlers.auth_handler
-        user_exists = yield auth_handler.does_user_exist(user_id)
-        if user_exists:
-            user_id, access_token, refresh_token = (
-                yield auth_handler.get_login_tuple_for_user_id(user_id)
-            )
-            result = {
-                "user_id": user_id,  # may have changed
-                "access_token": access_token,
-                "refresh_token": refresh_token,
-                "home_server": self.hs.hostname,
-            }
-
-        else:
-            user_id, access_token = (
-                yield self.handlers.registration_handler.register(localpart=user)
-            )
-            result = {
-                "user_id": user_id,  # may have changed
-                "access_token": access_token,
-                "home_server": self.hs.hostname,
-            }
-
-        defer.returnValue((200, result))
-
     @defer.inlineCallbacks
     def do_jwt_login(self, login_submission):
         token = login_submission.get("token", None)
@@ -243,19 +200,28 @@ class LoginRestServlet(ClientV1RestServlet):
             raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED)
 
         user_id = UserID.create(user, self.hs.hostname).to_string()
-        auth_handler = self.handlers.auth_handler
-        user_exists = yield auth_handler.does_user_exist(user_id)
-        if user_exists:
-            user_id, access_token, refresh_token = (
-                yield auth_handler.get_login_tuple_for_user_id(user_id)
+        auth_handler = self.auth_handler
+        registered_user_id = yield auth_handler.check_user_exists(user_id)
+        if registered_user_id:
+            device_id = yield self._register_device(
+                registered_user_id, login_submission
+            )
+            access_token, refresh_token = (
+                yield auth_handler.get_login_tuple_for_user_id(
+                    registered_user_id, device_id,
+                    login_submission.get("initial_device_display_name")
+                )
             )
             result = {
-                "user_id": user_id,  # may have changed
+                "user_id": registered_user_id,
                 "access_token": access_token,
                 "refresh_token": refresh_token,
                 "home_server": self.hs.hostname,
             }
         else:
+            # TODO: we should probably check that the register isn't going
+            # to fonx/change our user_id before registering the device
+            device_id = yield self._register_device(user_id, login_submission)
             user_id, access_token = (
                 yield self.handlers.registration_handler.register(localpart=user)
             )
@@ -267,32 +233,25 @@ class LoginRestServlet(ClientV1RestServlet):
 
         defer.returnValue((200, result))
 
-    # TODO Delete this after all CAS clients switch to token login instead
-    def parse_cas_response(self, cas_response_body):
-        root = ET.fromstring(cas_response_body)
-        if not root.tag.endswith("serviceResponse"):
-            raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
-        if not root[0].tag.endswith("authenticationSuccess"):
-            raise LoginError(401, "Unsuccessful CAS response", errcode=Codes.UNAUTHORIZED)
-        for child in root[0]:
-            if child.tag.endswith("user"):
-                user = child.text
-            if child.tag.endswith("attributes"):
-                attributes = {}
-                for attribute in child:
-                    # ElementTree library expands the namespace in attribute tags
-                    # to the full URL of the namespace.
-                    # See (https://docs.python.org/2/library/xml.etree.elementtree.html)
-                    # We don't care about namespace here and it will always be encased in
-                    # curly braces, so we remove them.
-                    if "}" in attribute.tag:
-                        attributes[attribute.tag.split("}")[1]] = attribute.text
-                    else:
-                        attributes[attribute.tag] = attribute.text
-        if user is None or attributes is None:
-            raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
-
-        return (user, attributes)
+    def _register_device(self, user_id, login_submission):
+        """Register a device for a user.
+
+        This is called after the user's credentials have been validated, but
+        before the access token has been issued.
+
+        Args:
+            (str) user_id: full canonical @user:id
+            (object) login_submission: dictionary supplied to /login call, from
+               which we pull device_id and initial_device_name
+        Returns:
+            defer.Deferred: (str) device_id
+        """
+        device_id = login_submission.get("device_id")
+        initial_display_name = login_submission.get(
+            "initial_device_display_name")
+        return self.device_handler.check_device_registered(
+            user_id, device_id, initial_display_name
+        )
 
 
 class SAML2RestServlet(ClientV1RestServlet):
@@ -338,18 +297,6 @@ class SAML2RestServlet(ClientV1RestServlet):
         defer.returnValue((200, {"status": "not_authenticated"}))
 
 
-# TODO Delete this after all CAS clients switch to token login instead
-class CasRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/login/cas", releases=())
-
-    def __init__(self, hs):
-        super(CasRestServlet, self).__init__(hs)
-        self.cas_server_url = hs.config.cas_server_url
-
-    def on_GET(self, request):
-        return (200, {"serverUrl": self.cas_server_url})
-
-
 class CasRedirectServlet(ClientV1RestServlet):
     PATTERNS = client_path_patterns("/login/cas/redirect", releases=())
 
@@ -381,6 +328,7 @@ class CasTicketServlet(ClientV1RestServlet):
         self.cas_server_url = hs.config.cas_server_url
         self.cas_service_url = hs.config.cas_service_url
         self.cas_required_attributes = hs.config.cas_required_attributes
+        self.auth_handler = hs.get_auth_handler()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
@@ -412,14 +360,14 @@ class CasTicketServlet(ClientV1RestServlet):
                     raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
 
         user_id = UserID.create(user, self.hs.hostname).to_string()
-        auth_handler = self.handlers.auth_handler
-        user_exists = yield auth_handler.does_user_exist(user_id)
-        if not user_exists:
-            user_id, _ = (
+        auth_handler = self.auth_handler
+        registered_user_id = yield auth_handler.check_user_exists(user_id)
+        if not registered_user_id:
+            registered_user_id, _ = (
                 yield self.handlers.registration_handler.register(localpart=user)
             )
 
-        login_token = auth_handler.generate_short_term_login_token(user_id)
+        login_token = auth_handler.generate_short_term_login_token(registered_user_id)
         redirect_url = self.add_login_token_to_redirect_url(client_redirect_url,
                                                             login_token)
         request.redirect(redirect_url)
@@ -433,30 +381,39 @@ class CasTicketServlet(ClientV1RestServlet):
         return urlparse.urlunparse(url_parts)
 
     def parse_cas_response(self, cas_response_body):
-        root = ET.fromstring(cas_response_body)
-        if not root.tag.endswith("serviceResponse"):
-            raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
-        if not root[0].tag.endswith("authenticationSuccess"):
-            raise LoginError(401, "Unsuccessful CAS response", errcode=Codes.UNAUTHORIZED)
-        for child in root[0]:
-            if child.tag.endswith("user"):
-                user = child.text
-            if child.tag.endswith("attributes"):
-                attributes = {}
-                for attribute in child:
-                    # ElementTree library expands the namespace in attribute tags
-                    # to the full URL of the namespace.
-                    # See (https://docs.python.org/2/library/xml.etree.elementtree.html)
-                    # We don't care about namespace here and it will always be encased in
-                    # curly braces, so we remove them.
-                    if "}" in attribute.tag:
-                        attributes[attribute.tag.split("}")[1]] = attribute.text
-                    else:
-                        attributes[attribute.tag] = attribute.text
-        if user is None or attributes is None:
-            raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
-
-        return (user, attributes)
+        user = None
+        attributes = None
+        try:
+            root = ET.fromstring(cas_response_body)
+            if not root.tag.endswith("serviceResponse"):
+                raise Exception("root of CAS response is not serviceResponse")
+            success = (root[0].tag.endswith("authenticationSuccess"))
+            for child in root[0]:
+                if child.tag.endswith("user"):
+                    user = child.text
+                if child.tag.endswith("attributes"):
+                    attributes = {}
+                    for attribute in child:
+                        # ElementTree library expands the namespace in
+                        # attribute tags to the full URL of the namespace.
+                        # We don't care about namespace here and it will always
+                        # be encased in curly braces, so we remove them.
+                        tag = attribute.tag
+                        if "}" in tag:
+                            tag = tag.split("}")[1]
+                        attributes[tag] = attribute.text
+            if user is None:
+                raise Exception("CAS response does not contain user")
+            if attributes is None:
+                raise Exception("CAS response does not contain attributes")
+        except Exception:
+            logger.error("Error parsing CAS response", exc_info=1)
+            raise LoginError(401, "Invalid CAS response",
+                             errcode=Codes.UNAUTHORIZED)
+        if not success:
+            raise LoginError(401, "Unsuccessful CAS response",
+                             errcode=Codes.UNAUTHORIZED)
+        return user, attributes
 
 
 def register_servlets(hs, http_server):
@@ -466,5 +423,3 @@ def register_servlets(hs, http_server):
     if hs.config.cas_enabled:
         CasRedirectServlet(hs).register(http_server)
         CasTicketServlet(hs).register(http_server)
-        CasRestServlet(hs).register(http_server)
-    # TODO PasswordResetRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index 02d837ee6a..6bb4821ec6 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -128,11 +128,9 @@ class PushRuleRestServlet(ClientV1RestServlet):
         # we build up the full structure and then decide which bits of it
         # to send which means doing unnecessary work sometimes but is
         # is probably not going to make a whole lot of difference
-        rawrules = yield self.store.get_push_rules_for_user(user_id)
+        rules = yield self.store.get_push_rules_for_user(user_id)
 
-        enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id)
-
-        rules = format_push_rules_for_user(requester.user, rawrules, enabled_map)
+        rules = format_push_rules_for_user(requester.user, rules)
 
         path = request.postpath[1:]
 
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index ab928a16da..9a2ed6ed88 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -17,7 +17,11 @@ from twisted.internet import defer
 
 from synapse.api.errors import SynapseError, Codes
 from synapse.push import PusherConfigException
-from synapse.http.servlet import parse_json_object_from_request
+from synapse.http.servlet import (
+    parse_json_object_from_request, parse_string, RestServlet
+)
+from synapse.http.server import finish_request
+from synapse.api.errors import StoreError
 
 from .base import ClientV1RestServlet, client_path_patterns
 
@@ -136,6 +140,57 @@ class PushersSetRestServlet(ClientV1RestServlet):
         return 200, {}
 
 
+class PushersRemoveRestServlet(RestServlet):
+    """
+    To allow pusher to be delete by clicking a link (ie. GET request)
+    """
+    PATTERNS = client_path_patterns("/pushers/remove$")
+    SUCCESS_HTML = "<html><body>You have been unsubscribed</body><html>"
+
+    def __init__(self, hs):
+        super(RestServlet, self).__init__()
+        self.hs = hs
+        self.notifier = hs.get_notifier()
+        self.auth = hs.get_v1auth()
+
+    @defer.inlineCallbacks
+    def on_GET(self, request):
+        requester = yield self.auth.get_user_by_req(request, rights="delete_pusher")
+        user = requester.user
+
+        app_id = parse_string(request, "app_id", required=True)
+        pushkey = parse_string(request, "pushkey", required=True)
+
+        pusher_pool = self.hs.get_pusherpool()
+
+        try:
+            yield pusher_pool.remove_pusher(
+                app_id=app_id,
+                pushkey=pushkey,
+                user_id=user.to_string(),
+            )
+        except StoreError as se:
+            if se.code != 404:
+                # This is fine: they're already unsubscribed
+                raise
+
+        self.notifier.on_new_replication_data()
+
+        request.setResponseCode(200)
+        request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+        request.setHeader(b"Server", self.hs.version_string)
+        request.setHeader(b"Content-Length", b"%d" % (
+            len(PushersRemoveRestServlet.SUCCESS_HTML),
+        ))
+        request.write(PushersRemoveRestServlet.SUCCESS_HTML)
+        finish_request(request)
+        defer.returnValue(None)
+
+    def on_OPTIONS(self, _):
+        return 200, {}
+
+
 def register_servlets(hs, http_server):
     PushersRestServlet(hs).register(http_server)
     PushersSetRestServlet(hs).register(http_server)
+    PushersRemoveRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py
index e3f4fbb0bb..2383b9df86 100644
--- a/synapse/rest/client/v1/register.py
+++ b/synapse/rest/client/v1/register.py
@@ -52,6 +52,10 @@ class RegisterRestServlet(ClientV1RestServlet):
     PATTERNS = client_path_patterns("/register$", releases=(), include_in_unstable=False)
 
     def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
         super(RegisterRestServlet, self).__init__(hs)
         # sessions are stored as:
         # self.sessions = {
@@ -60,6 +64,7 @@ class RegisterRestServlet(ClientV1RestServlet):
         # TODO: persistent storage
         self.sessions = {}
         self.enable_registration = hs.config.enable_registration
+        self.auth_handler = hs.get_auth_handler()
 
     def on_GET(self, request):
         if self.hs.config.enable_registration_captcha:
@@ -299,9 +304,10 @@ class RegisterRestServlet(ClientV1RestServlet):
         user_localpart = register_json["user"].encode("utf-8")
 
         handler = self.handlers.registration_handler
-        (user_id, token) = yield handler.appservice_register(
+        user_id = yield handler.appservice_register(
             user_localpart, as_token
         )
+        token = yield self.auth_handler.issue_access_token(user_id)
         self._remove_session(session)
         defer.returnValue({
             "user_id": user_id,
@@ -324,6 +330,14 @@ class RegisterRestServlet(ClientV1RestServlet):
             raise SynapseError(400, "Shared secret registration is not enabled")
 
         user = register_json["user"].encode("utf-8")
+        password = register_json["password"].encode("utf-8")
+        admin = register_json.get("admin", None)
+
+        # Its important to check as we use null bytes as HMAC field separators
+        if "\x00" in user:
+            raise SynapseError(400, "Invalid user")
+        if "\x00" in password:
+            raise SynapseError(400, "Invalid password")
 
         # str() because otherwise hmac complains that 'unicode' does not
         # have the buffer interface
@@ -331,17 +345,21 @@ class RegisterRestServlet(ClientV1RestServlet):
 
         want_mac = hmac.new(
             key=self.hs.config.registration_shared_secret,
-            msg=user,
             digestmod=sha1,
-        ).hexdigest()
-
-        password = register_json["password"].encode("utf-8")
+        )
+        want_mac.update(user)
+        want_mac.update("\x00")
+        want_mac.update(password)
+        want_mac.update("\x00")
+        want_mac.update("admin" if admin else "notadmin")
+        want_mac = want_mac.hexdigest()
 
         if compare_digest(want_mac, got_mac):
             handler = self.handlers.registration_handler
             user_id, token = yield handler.register(
                 localpart=user,
                 password=password,
+                admin=bool(admin),
             )
             self._remove_session(session)
             defer.returnValue({
@@ -410,12 +428,15 @@ class CreateUserRestServlet(ClientV1RestServlet):
             raise SynapseError(400, "Failed to parse 'duration_seconds'")
         if duration_seconds > self.direct_user_creation_max_duration:
             duration_seconds = self.direct_user_creation_max_duration
+        password_hash = user_json["password_hash"].encode("utf-8") \
+            if user_json.get("password_hash") else None
 
         handler = self.handlers.registration_handler
         user_id, token = yield handler.get_or_create_user(
             localpart=localpart,
             displayname=displayname,
-            duration_seconds=duration_seconds
+            duration_in_ms=(duration_seconds * 1000),
+            password_hash=password_hash
         )
 
         defer.returnValue({
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 644aa4e513..866a1e9120 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -20,12 +20,14 @@ from .base import ClientV1RestServlet, client_path_patterns
 from synapse.api.errors import SynapseError, Codes, AuthError
 from synapse.streams.config import PaginationConfig
 from synapse.api.constants import EventTypes, Membership
+from synapse.api.filtering import Filter
 from synapse.types import UserID, RoomID, RoomAlias
 from synapse.events.utils import serialize_event
 from synapse.http.servlet import parse_json_object_from_request
 
 import logging
 import urllib
+import ujson as json
 
 logger = logging.getLogger(__name__)
 
@@ -72,8 +74,6 @@ class RoomCreateRestServlet(ClientV1RestServlet):
 
     def get_room_config(self, request):
         user_supplied_config = parse_json_object_from_request(request)
-        # default visibility
-        user_supplied_config.setdefault("visibility", "public")
         return user_supplied_config
 
     def on_OPTIONS(self, request):
@@ -279,8 +279,16 @@ class PublicRoomListRestServlet(ClientV1RestServlet):
 
     @defer.inlineCallbacks
     def on_GET(self, request):
-        handler = self.handlers.room_list_handler
-        data = yield handler.get_public_room_list()
+        try:
+            yield self.auth.get_user_by_req(request)
+        except AuthError:
+            # This endpoint isn't authed, but its useful to know who's hitting
+            # it if they *do* supply an access token
+            pass
+
+        handler = self.hs.get_room_list_handler()
+        data = yield handler.get_aggregated_public_room_list()
+
         defer.returnValue((200, data))
 
 
@@ -321,12 +329,19 @@ class RoomMessageListRestServlet(ClientV1RestServlet):
             request, default_limit=10,
         )
         as_client_event = "raw" not in request.args
+        filter_bytes = request.args.get("filter", None)
+        if filter_bytes:
+            filter_json = urllib.unquote(filter_bytes[-1]).decode("UTF-8")
+            event_filter = Filter(json.loads(filter_json))
+        else:
+            event_filter = None
         handler = self.handlers.message_handler
         msgs = yield handler.get_messages(
             room_id=room_id,
             requester=requester,
             pagin_config=pagination_config,
-            as_client_event=as_client_event
+            as_client_event=as_client_event,
+            event_filter=event_filter,
         )
 
         defer.returnValue((200, msgs))
diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py
index b6faa2b0e6..20e765f48f 100644
--- a/synapse/rest/client/v2_alpha/_base.py
+++ b/synapse/rest/client/v2_alpha/_base.py
@@ -25,7 +25,9 @@ import logging
 logger = logging.getLogger(__name__)
 
 
-def client_v2_patterns(path_regex, releases=(0,)):
+def client_v2_patterns(path_regex, releases=(0,),
+                       v2_alpha=True,
+                       unstable=True):
     """Creates a regex compiled client path with the correct client path
     prefix.
 
@@ -35,9 +37,12 @@ def client_v2_patterns(path_regex, releases=(0,)):
     Returns:
         SRE_Pattern
     """
-    patterns = [re.compile("^" + CLIENT_V2_ALPHA_PREFIX + path_regex)]
-    unstable_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/unstable")
-    patterns.append(re.compile("^" + unstable_prefix + path_regex))
+    patterns = []
+    if v2_alpha:
+        patterns.append(re.compile("^" + CLIENT_V2_ALPHA_PREFIX + path_regex))
+    if unstable:
+        unstable_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/unstable")
+        patterns.append(re.compile("^" + unstable_prefix + path_regex))
     for release in releases:
         new_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/r%d" % release)
         patterns.append(re.compile("^" + new_prefix + path_regex))
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index c88c270537..eb49ad62e9 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -28,14 +28,46 @@ import logging
 logger = logging.getLogger(__name__)
 
 
+class PasswordRequestTokenRestServlet(RestServlet):
+    PATTERNS = client_v2_patterns("/account/password/email/requestToken$")
+
+    def __init__(self, hs):
+        super(PasswordRequestTokenRestServlet, self).__init__()
+        self.hs = hs
+        self.identity_handler = hs.get_handlers().identity_handler
+
+    @defer.inlineCallbacks
+    def on_POST(self, request):
+        body = parse_json_object_from_request(request)
+
+        required = ['id_server', 'client_secret', 'email', 'send_attempt']
+        absent = []
+        for k in required:
+            if k not in body:
+                absent.append(k)
+
+        if absent:
+            raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+
+        existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+            'email', body['email']
+        )
+
+        if existingUid is None:
+            raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
+
+        ret = yield self.identity_handler.requestEmailToken(**body)
+        defer.returnValue((200, ret))
+
+
 class PasswordRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/password")
+    PATTERNS = client_v2_patterns("/account/password$")
 
     def __init__(self, hs):
         super(PasswordRestServlet, self).__init__()
         self.hs = hs
         self.auth = hs.get_auth()
-        self.auth_handler = hs.get_handlers().auth_handler
+        self.auth_handler = hs.get_auth_handler()
 
     @defer.inlineCallbacks
     def on_POST(self, request):
@@ -89,15 +121,90 @@ class PasswordRestServlet(RestServlet):
         return 200, {}
 
 
+class DeactivateAccountRestServlet(RestServlet):
+    PATTERNS = client_v2_patterns("/account/deactivate$")
+
+    def __init__(self, hs):
+        self.hs = hs
+        self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
+        self.auth_handler = hs.get_auth_handler()
+        super(DeactivateAccountRestServlet, self).__init__()
+
+    @defer.inlineCallbacks
+    def on_POST(self, request):
+        body = parse_json_object_from_request(request)
+
+        authed, result, params, _ = yield self.auth_handler.check_auth([
+            [LoginType.PASSWORD],
+        ], body, self.hs.get_ip_from_request(request))
+
+        if not authed:
+            defer.returnValue((401, result))
+
+        user_id = None
+        requester = None
+
+        if LoginType.PASSWORD in result:
+            # if using password, they should also be logged in
+            requester = yield self.auth.get_user_by_req(request)
+            user_id = requester.user.to_string()
+            if user_id != result[LoginType.PASSWORD]:
+                raise LoginError(400, "", Codes.UNKNOWN)
+        else:
+            logger.error("Auth succeeded but no known type!", result.keys())
+            raise SynapseError(500, "", Codes.UNKNOWN)
+
+        # FIXME: Theoretically there is a race here wherein user resets password
+        # using threepid.
+        yield self.store.user_delete_access_tokens(user_id)
+        yield self.store.user_delete_threepids(user_id)
+        yield self.store.user_set_password_hash(user_id, None)
+
+        defer.returnValue((200, {}))
+
+
+class ThreepidRequestTokenRestServlet(RestServlet):
+    PATTERNS = client_v2_patterns("/account/3pid/email/requestToken$")
+
+    def __init__(self, hs):
+        self.hs = hs
+        super(ThreepidRequestTokenRestServlet, self).__init__()
+        self.identity_handler = hs.get_handlers().identity_handler
+
+    @defer.inlineCallbacks
+    def on_POST(self, request):
+        body = parse_json_object_from_request(request)
+
+        required = ['id_server', 'client_secret', 'email', 'send_attempt']
+        absent = []
+        for k in required:
+            if k not in body:
+                absent.append(k)
+
+        if absent:
+            raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+
+        existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+            'email', body['email']
+        )
+
+        if existingUid is not None:
+            raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
+
+        ret = yield self.identity_handler.requestEmailToken(**body)
+        defer.returnValue((200, ret))
+
+
 class ThreepidRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/3pid")
+    PATTERNS = client_v2_patterns("/account/3pid$")
 
     def __init__(self, hs):
         super(ThreepidRestServlet, self).__init__()
         self.hs = hs
         self.identity_handler = hs.get_handlers().identity_handler
         self.auth = hs.get_auth()
-        self.auth_handler = hs.get_handlers().auth_handler
+        self.auth_handler = hs.get_auth_handler()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
@@ -157,5 +264,8 @@ class ThreepidRestServlet(RestServlet):
 
 
 def register_servlets(hs, http_server):
+    PasswordRequestTokenRestServlet(hs).register(http_server)
     PasswordRestServlet(hs).register(http_server)
+    DeactivateAccountRestServlet(hs).register(http_server)
+    ThreepidRequestTokenRestServlet(hs).register(http_server)
     ThreepidRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index 78181b7b18..58d3cad6a1 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -104,7 +104,7 @@ class AuthRestServlet(RestServlet):
         super(AuthRestServlet, self).__init__()
         self.hs = hs
         self.auth = hs.get_auth()
-        self.auth_handler = hs.get_handlers().auth_handler
+        self.auth_handler = hs.get_auth_handler()
         self.registration_handler = hs.get_handlers().registration_handler
 
     @defer.inlineCallbacks
diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py
new file mode 100644
index 0000000000..8fbd3d3dfc
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/devices.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.http import servlet
+from ._base import client_v2_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class DevicesRestServlet(servlet.RestServlet):
+    PATTERNS = client_v2_patterns("/devices$", releases=[], v2_alpha=False)
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        super(DevicesRestServlet, self).__init__()
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.device_handler = hs.get_device_handler()
+
+    @defer.inlineCallbacks
+    def on_GET(self, request):
+        requester = yield self.auth.get_user_by_req(request)
+        devices = yield self.device_handler.get_devices_by_user(
+            requester.user.to_string()
+        )
+        defer.returnValue((200, {"devices": devices}))
+
+
+class DeviceRestServlet(servlet.RestServlet):
+    PATTERNS = client_v2_patterns("/devices/(?P<device_id>[^/]*)$",
+                                  releases=[], v2_alpha=False)
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        super(DeviceRestServlet, self).__init__()
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.device_handler = hs.get_device_handler()
+
+    @defer.inlineCallbacks
+    def on_GET(self, request, device_id):
+        requester = yield self.auth.get_user_by_req(request)
+        device = yield self.device_handler.get_device(
+            requester.user.to_string(),
+            device_id,
+        )
+        defer.returnValue((200, device))
+
+    @defer.inlineCallbacks
+    def on_DELETE(self, request, device_id):
+        # XXX: it's not completely obvious we want to expose this endpoint.
+        # It allows the client to delete access tokens, which feels like a
+        # thing which merits extra auth. But if we want to do the interactive-
+        # auth dance, we should really make it possible to delete more than one
+        # device at a time.
+        requester = yield self.auth.get_user_by_req(request)
+        yield self.device_handler.delete_device(
+            requester.user.to_string(),
+            device_id,
+        )
+        defer.returnValue((200, {}))
+
+    @defer.inlineCallbacks
+    def on_PUT(self, request, device_id):
+        requester = yield self.auth.get_user_by_req(request)
+
+        body = servlet.parse_json_object_from_request(request)
+        yield self.device_handler.update_device(
+            requester.user.to_string(),
+            device_id,
+            body
+        )
+        defer.returnValue((200, {}))
+
+
+def register_servlets(hs, http_server):
+    DevicesRestServlet(hs).register(http_server)
+    DeviceRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py
index 89ab39491c..c5ff16adf3 100644
--- a/synapse/rest/client/v2_alpha/keys.py
+++ b/synapse/rest/client/v2_alpha/keys.py
@@ -13,24 +13,25 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
+import simplejson as json
+from canonicaljson import encode_canonical_json
 from twisted.internet import defer
 
+import synapse.api.errors
+import synapse.server
+import synapse.types
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.types import UserID
-
-from canonicaljson import encode_canonical_json
-
 from ._base import client_v2_patterns
 
-import logging
-import simplejson as json
-
 logger = logging.getLogger(__name__)
 
 
 class KeyUploadServlet(RestServlet):
     """
-    POST /keys/upload/<device_id> HTTP/1.1
+    POST /keys/upload HTTP/1.1
     Content-Type: application/json
 
     {
@@ -53,23 +54,45 @@ class KeyUploadServlet(RestServlet):
       },
     }
     """
-    PATTERNS = client_v2_patterns("/keys/upload/(?P<device_id>[^/]*)", releases=())
+    PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$",
+                                  releases=())
 
     def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
         super(KeyUploadServlet, self).__init__()
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
         self.auth = hs.get_auth()
+        self.device_handler = hs.get_device_handler()
 
     @defer.inlineCallbacks
     def on_POST(self, request, device_id):
         requester = yield self.auth.get_user_by_req(request)
+
         user_id = requester.user.to_string()
-        # TODO: Check that the device_id matches that in the authentication
-        # or derive the device_id from the authentication instead.
 
         body = parse_json_object_from_request(request)
 
+        if device_id is not None:
+            # passing the device_id here is deprecated; however, we allow it
+            # for now for compatibility with older clients.
+            if (requester.device_id is not None and
+                    device_id != requester.device_id):
+                logger.warning("Client uploading keys for a different device "
+                               "(logged in as %s, uploading for %s)",
+                               requester.device_id, device_id)
+        else:
+            device_id = requester.device_id
+
+        if device_id is None:
+            raise synapse.api.errors.SynapseError(
+                400,
+                "To upload keys, you must pass device_id when authenticating"
+            )
+
         time_now = self.clock.time_msec()
 
         # TODO: Validate the JSON to make sure it has the right keys.
@@ -102,13 +125,12 @@ class KeyUploadServlet(RestServlet):
                 user_id, device_id, time_now, key_list
             )
 
-        result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
-        defer.returnValue((200, {"one_time_key_counts": result}))
-
-    @defer.inlineCallbacks
-    def on_GET(self, request, device_id):
-        requester = yield self.auth.get_user_by_req(request)
-        user_id = requester.user.to_string()
+        # the device should have been registered already, but it may have been
+        # deleted due to a race with a DELETE request. Or we may be using an
+        # old access_token without an associated device_id. Either way, we
+        # need to double-check the device is registered to avoid ending up with
+        # keys without a corresponding device.
+        self.device_handler.check_device_registered(user_id, device_id)
 
         result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
         defer.returnValue((200, {"one_time_key_counts": result}))
@@ -162,17 +184,19 @@ class KeyQueryServlet(RestServlet):
     )
 
     def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer):
+        """
         super(KeyQueryServlet, self).__init__()
-        self.store = hs.get_datastore()
         self.auth = hs.get_auth()
-        self.federation = hs.get_replication_layer()
-        self.is_mine = hs.is_mine
+        self.e2e_keys_handler = hs.get_e2e_keys_handler()
 
     @defer.inlineCallbacks
     def on_POST(self, request, user_id, device_id):
         yield self.auth.get_user_by_req(request)
         body = parse_json_object_from_request(request)
-        result = yield self.handle_request(body)
+        result = yield self.e2e_keys_handler.query_devices(body)
         defer.returnValue(result)
 
     @defer.inlineCallbacks
@@ -181,45 +205,11 @@ class KeyQueryServlet(RestServlet):
         auth_user_id = requester.user.to_string()
         user_id = user_id if user_id else auth_user_id
         device_ids = [device_id] if device_id else []
-        result = yield self.handle_request(
+        result = yield self.e2e_keys_handler.query_devices(
             {"device_keys": {user_id: device_ids}}
         )
         defer.returnValue(result)
 
-    @defer.inlineCallbacks
-    def handle_request(self, body):
-        local_query = []
-        remote_queries = {}
-        for user_id, device_ids in body.get("device_keys", {}).items():
-            user = UserID.from_string(user_id)
-            if self.is_mine(user):
-                if not device_ids:
-                    local_query.append((user_id, None))
-                else:
-                    for device_id in device_ids:
-                        local_query.append((user_id, device_id))
-            else:
-                remote_queries.setdefault(user.domain, {})[user_id] = list(
-                    device_ids
-                )
-        results = yield self.store.get_e2e_device_keys(local_query)
-
-        json_result = {}
-        for user_id, device_keys in results.items():
-            for device_id, json_bytes in device_keys.items():
-                json_result.setdefault(user_id, {})[device_id] = json.loads(
-                    json_bytes
-                )
-
-        for destination, device_keys in remote_queries.items():
-            remote_result = yield self.federation.query_client_keys(
-                destination, {"device_keys": device_keys}
-            )
-            for user_id, keys in remote_result["device_keys"].items():
-                if user_id in device_keys:
-                    json_result[user_id] = keys
-        defer.returnValue((200, {"device_keys": json_result}))
-
 
 class OneTimeKeyServlet(RestServlet):
     """
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 1ecc02d94d..943f5676a3 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -41,17 +41,59 @@ else:
 logger = logging.getLogger(__name__)
 
 
+class RegisterRequestTokenRestServlet(RestServlet):
+    PATTERNS = client_v2_patterns("/register/email/requestToken$")
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        super(RegisterRequestTokenRestServlet, self).__init__()
+        self.hs = hs
+        self.identity_handler = hs.get_handlers().identity_handler
+
+    @defer.inlineCallbacks
+    def on_POST(self, request):
+        body = parse_json_object_from_request(request)
+
+        required = ['id_server', 'client_secret', 'email', 'send_attempt']
+        absent = []
+        for k in required:
+            if k not in body:
+                absent.append(k)
+
+        if len(absent) > 0:
+            raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+
+        existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+            'email', body['email']
+        )
+
+        if existingUid is not None:
+            raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
+
+        ret = yield self.identity_handler.requestEmailToken(**body)
+        defer.returnValue((200, ret))
+
+
 class RegisterRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/register")
+    PATTERNS = client_v2_patterns("/register$")
 
     def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
         super(RegisterRestServlet, self).__init__()
+
         self.hs = hs
         self.auth = hs.get_auth()
         self.store = hs.get_datastore()
-        self.auth_handler = hs.get_handlers().auth_handler
+        self.auth_handler = hs.get_auth_handler()
         self.registration_handler = hs.get_handlers().registration_handler
         self.identity_handler = hs.get_handlers().identity_handler
+        self.device_handler = hs.get_device_handler()
 
     @defer.inlineCallbacks
     def on_POST(self, request):
@@ -70,10 +112,6 @@ class RegisterRestServlet(RestServlet):
                 "Do not understand membership kind: %s" % (kind,)
             )
 
-        if '/register/email/requestToken' in request.path:
-            ret = yield self.onEmailTokenRequest(request)
-            defer.returnValue(ret)
-
         body = parse_json_object_from_request(request)
 
         # we do basic sanity checks here because the auth layer will store these
@@ -104,11 +142,12 @@ class RegisterRestServlet(RestServlet):
             # Set the desired user according to the AS API (which uses the
             # 'user' key not 'username'). Since this is a new addition, we'll
             # fallback to 'username' if they gave one.
-            if isinstance(body.get("user"), basestring):
-                desired_username = body["user"]
-            result = yield self._do_appservice_registration(
-                desired_username, request.args["access_token"][0]
-            )
+            desired_username = body.get("user", desired_username)
+
+            if isinstance(desired_username, basestring):
+                result = yield self._do_appservice_registration(
+                    desired_username, request.args["access_token"][0], body
+                )
             defer.returnValue((200, result))  # we throw for non 200 responses
             return
 
@@ -117,7 +156,7 @@ class RegisterRestServlet(RestServlet):
             # FIXME: Should we really be determining if this is shared secret
             # auth based purely on the 'mac' key?
             result = yield self._do_shared_secret_registration(
-                desired_username, desired_password, body["mac"]
+                desired_username, desired_password, body
             )
             defer.returnValue((200, result))  # we throw for non 200 responses
             return
@@ -157,12 +196,12 @@ class RegisterRestServlet(RestServlet):
                 [LoginType.EMAIL_IDENTITY]
             ]
 
-        authed, result, params, session_id = yield self.auth_handler.check_auth(
+        authed, auth_result, params, session_id = yield self.auth_handler.check_auth(
             flows, body, self.hs.get_ip_from_request(request)
         )
 
         if not authed:
-            defer.returnValue((401, result))
+            defer.returnValue((401, auth_result))
             return
 
         if registered_user_id is not None:
@@ -170,106 +209,58 @@ class RegisterRestServlet(RestServlet):
                 "Already registered user ID %r for this session",
                 registered_user_id
             )
-            access_token = yield self.auth_handler.issue_access_token(registered_user_id)
-            refresh_token = yield self.auth_handler.issue_refresh_token(
-                registered_user_id
+            # don't re-register the email address
+            add_email = False
+        else:
+            # NB: This may be from the auth handler and NOT from the POST
+            if 'password' not in params:
+                raise SynapseError(400, "Missing password.",
+                                   Codes.MISSING_PARAM)
+
+            desired_username = params.get("username", None)
+            new_password = params.get("password", None)
+            guest_access_token = params.get("guest_access_token", None)
+
+            (registered_user_id, _) = yield self.registration_handler.register(
+                localpart=desired_username,
+                password=new_password,
+                guest_access_token=guest_access_token,
+                generate_token=False,
             )
-            defer.returnValue((200, {
-                "user_id": registered_user_id,
-                "access_token": access_token,
-                "home_server": self.hs.hostname,
-                "refresh_token": refresh_token,
-            }))
-
-        # NB: This may be from the auth handler and NOT from the POST
-        if 'password' not in params:
-            raise SynapseError(400, "Missing password.", Codes.MISSING_PARAM)
-
-        desired_username = params.get("username", None)
-        new_password = params.get("password", None)
-        guest_access_token = params.get("guest_access_token", None)
-
-        (user_id, token) = yield self.registration_handler.register(
-            localpart=desired_username,
-            password=new_password,
-            guest_access_token=guest_access_token,
-        )
 
-        # remember that we've now registered that user account, and with what
-        # user ID (since the user may not have specified)
-        self.auth_handler.set_session_data(
-            session_id, "registered_user_id", user_id
+            # remember that we've now registered that user account, and with
+            #  what user ID (since the user may not have specified)
+            self.auth_handler.set_session_data(
+                session_id, "registered_user_id", registered_user_id
+            )
+
+            add_email = True
+
+        return_dict = yield self._create_registration_details(
+            registered_user_id, params
         )
 
-        if result and LoginType.EMAIL_IDENTITY in result:
-            threepid = result[LoginType.EMAIL_IDENTITY]
-
-            for reqd in ['medium', 'address', 'validated_at']:
-                if reqd not in threepid:
-                    logger.info("Can't add incomplete 3pid")
-                else:
-                    yield self.auth_handler.add_threepid(
-                        user_id,
-                        threepid['medium'],
-                        threepid['address'],
-                        threepid['validated_at'],
-                    )
-
-                    # And we add an email pusher for them by default, but only
-                    # if email notifications are enabled (so people don't start
-                    # getting mail spam where they weren't before if email
-                    # notifs are set up on a home server)
-                    if (
-                        self.hs.config.email_enable_notifs and
-                        self.hs.config.email_notif_for_new_users
-                    ):
-                        # Pull the ID of the access token back out of the db
-                        # It would really make more sense for this to be passed
-                        # up when the access token is saved, but that's quite an
-                        # invasive change I'd rather do separately.
-                        user_tuple = yield self.store.get_user_by_access_token(
-                            token
-                        )
-
-                        yield self.hs.get_pusherpool().add_pusher(
-                            user_id=user_id,
-                            access_token=user_tuple["token_id"],
-                            kind="email",
-                            app_id="m.email",
-                            app_display_name="Email Notifications",
-                            device_display_name=threepid["address"],
-                            pushkey=threepid["address"],
-                            lang=None,  # We don't know a user's language here
-                            data={},
-                        )
-
-            if 'bind_email' in params and params['bind_email']:
-                logger.info("bind_email specified: binding")
-
-                emailThreepid = result[LoginType.EMAIL_IDENTITY]
-                threepid_creds = emailThreepid['threepid_creds']
-                logger.debug("Binding emails %s to %s" % (
-                    emailThreepid, user_id
-                ))
-                yield self.identity_handler.bind_threepid(threepid_creds, user_id)
-            else:
-                logger.info("bind_email not specified: not binding email")
-
-        result = yield self._create_registration_details(user_id, token)
-        defer.returnValue((200, result))
+        if add_email and auth_result and LoginType.EMAIL_IDENTITY in auth_result:
+            threepid = auth_result[LoginType.EMAIL_IDENTITY]
+            yield self._register_email_threepid(
+                registered_user_id, threepid, return_dict["access_token"],
+                params.get("bind_email")
+            )
+
+        defer.returnValue((200, return_dict))
 
     def on_OPTIONS(self, _):
         return 200, {}
 
     @defer.inlineCallbacks
-    def _do_appservice_registration(self, username, as_token):
-        (user_id, token) = yield self.registration_handler.appservice_register(
+    def _do_appservice_registration(self, username, as_token, body):
+        user_id = yield self.registration_handler.appservice_register(
             username, as_token
         )
-        defer.returnValue((yield self._create_registration_details(user_id, token)))
+        defer.returnValue((yield self._create_registration_details(user_id, body)))
 
     @defer.inlineCallbacks
-    def _do_shared_secret_registration(self, username, password, mac):
+    def _do_shared_secret_registration(self, username, password, body):
         if not self.hs.config.registration_shared_secret:
             raise SynapseError(400, "Shared secret registration is not enabled")
 
@@ -277,7 +268,7 @@ class RegisterRestServlet(RestServlet):
 
         # str() because otherwise hmac complains that 'unicode' does not
         # have the buffer interface
-        got_mac = str(mac)
+        got_mac = str(body["mac"])
 
         want_mac = hmac.new(
             key=self.hs.config.registration_shared_secret,
@@ -290,43 +281,132 @@ class RegisterRestServlet(RestServlet):
                 403, "HMAC incorrect",
             )
 
-        (user_id, token) = yield self.registration_handler.register(
-            localpart=username, password=password
+        (user_id, _) = yield self.registration_handler.register(
+            localpart=username, password=password, generate_token=False,
         )
-        defer.returnValue((yield self._create_registration_details(user_id, token)))
 
-    @defer.inlineCallbacks
-    def _create_registration_details(self, user_id, token):
-        refresh_token = yield self.auth_handler.issue_refresh_token(user_id)
-        defer.returnValue({
-            "user_id": user_id,
-            "access_token": token,
-            "home_server": self.hs.hostname,
-            "refresh_token": refresh_token,
-        })
+        result = yield self._create_registration_details(user_id, body)
+        defer.returnValue(result)
 
     @defer.inlineCallbacks
-    def onEmailTokenRequest(self, request):
-        body = parse_json_object_from_request(request)
+    def _register_email_threepid(self, user_id, threepid, token, bind_email):
+        """Add an email address as a 3pid identifier
+
+        Also adds an email pusher for the email address, if configured in the
+        HS config
+
+        Also optionally binds emails to the given user_id on the identity server
+
+        Args:
+            user_id (str): id of user
+            threepid (object): m.login.email.identity auth response
+            token (str): access_token for the user
+            bind_email (bool): true if the client requested the email to be
+                bound at the identity server
+        Returns:
+            defer.Deferred:
+        """
+        reqd = ('medium', 'address', 'validated_at')
+        if any(x not in threepid for x in reqd):
+            logger.info("Can't add incomplete 3pid")
+            defer.returnValue()
+
+        yield self.auth_handler.add_threepid(
+            user_id,
+            threepid['medium'],
+            threepid['address'],
+            threepid['validated_at'],
+        )
 
-        required = ['id_server', 'client_secret', 'email', 'send_attempt']
-        absent = []
-        for k in required:
-            if k not in body:
-                absent.append(k)
+        # And we add an email pusher for them by default, but only
+        # if email notifications are enabled (so people don't start
+        # getting mail spam where they weren't before if email
+        # notifs are set up on a home server)
+        if (self.hs.config.email_enable_notifs and
+                self.hs.config.email_notif_for_new_users):
+            # Pull the ID of the access token back out of the db
+            # It would really make more sense for this to be passed
+            # up when the access token is saved, but that's quite an
+            # invasive change I'd rather do separately.
+            user_tuple = yield self.store.get_user_by_access_token(
+                token
+            )
+            token_id = user_tuple["token_id"]
+
+            yield self.hs.get_pusherpool().add_pusher(
+                user_id=user_id,
+                access_token=token_id,
+                kind="email",
+                app_id="m.email",
+                app_display_name="Email Notifications",
+                device_display_name=threepid["address"],
+                pushkey=threepid["address"],
+                lang=None,  # We don't know a user's language here
+                data={},
+            )
 
-        if len(absent) > 0:
-            raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+        if bind_email:
+            logger.info("bind_email specified: binding")
+            logger.debug("Binding emails %s to %s" % (
+                threepid, user_id
+            ))
+            yield self.identity_handler.bind_threepid(
+                threepid['threepid_creds'], user_id
+            )
+        else:
+            logger.info("bind_email not specified: not binding email")
 
-        existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
-            'email', body['email']
+    @defer.inlineCallbacks
+    def _create_registration_details(self, user_id, params):
+        """Complete registration of newly-registered user
+
+        Allocates device_id if one was not given; also creates access_token
+        and refresh_token.
+
+        Args:
+            (str) user_id: full canonical @user:id
+            (object) params: registration parameters, from which we pull
+                device_id and initial_device_name
+        Returns:
+            defer.Deferred: (object) dictionary for response from /register
+        """
+        device_id = yield self._register_device(user_id, params)
+
+        access_token, refresh_token = (
+            yield self.auth_handler.get_login_tuple_for_user_id(
+                user_id, device_id=device_id,
+                initial_display_name=params.get("initial_device_display_name")
+            )
         )
 
-        if existingUid is not None:
-            raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
+        defer.returnValue({
+            "user_id": user_id,
+            "access_token": access_token,
+            "home_server": self.hs.hostname,
+            "refresh_token": refresh_token,
+            "device_id": device_id,
+        })
 
-        ret = yield self.identity_handler.requestEmailToken(**body)
-        defer.returnValue((200, ret))
+    def _register_device(self, user_id, params):
+        """Register a device for a user.
+
+        This is called after the user's credentials have been validated, but
+        before the access token has been issued.
+
+        Args:
+            (str) user_id: full canonical @user:id
+            (object) params: registration parameters, from which we pull
+                device_id and initial_device_name
+        Returns:
+            defer.Deferred: (str) device_id
+        """
+        # register the user's device
+        device_id = params.get("device_id")
+        initial_display_name = params.get("initial_device_display_name")
+        device_id = self.device_handler.check_device_registered(
+            user_id, device_id, initial_display_name
+        )
+        return device_id
 
     @defer.inlineCallbacks
     def _do_guest_registration(self):
@@ -336,7 +416,11 @@ class RegisterRestServlet(RestServlet):
             generate_token=False,
             make_guest=True
         )
-        access_token = self.auth_handler.generate_access_token(user_id, ["guest = true"])
+        access_token = self.auth_handler.generate_access_token(
+            user_id, ["guest = true"]
+        )
+        # XXX the "guest" caveat is not copied by /tokenrefresh. That's ok
+        # so long as we don't return a refresh_token here.
         defer.returnValue((200, {
             "user_id": user_id,
             "access_token": access_token,
@@ -345,4 +429,5 @@ class RegisterRestServlet(RestServlet):
 
 
 def register_servlets(hs, http_server):
+    RegisterRequestTokenRestServlet(hs).register(http_server)
     RegisterRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py
index a158c2209a..0d312c91d4 100644
--- a/synapse/rest/client/v2_alpha/tokenrefresh.py
+++ b/synapse/rest/client/v2_alpha/tokenrefresh.py
@@ -38,10 +38,14 @@ class TokenRefreshRestServlet(RestServlet):
         body = parse_json_object_from_request(request)
         try:
             old_refresh_token = body["refresh_token"]
-            auth_handler = self.hs.get_handlers().auth_handler
-            (user_id, new_refresh_token) = yield self.store.exchange_refresh_token(
-                old_refresh_token, auth_handler.generate_refresh_token)
-            new_access_token = yield auth_handler.issue_access_token(user_id)
+            auth_handler = self.hs.get_auth_handler()
+            refresh_result = yield self.store.exchange_refresh_token(
+                old_refresh_token, auth_handler.generate_refresh_token
+            )
+            (user_id, new_refresh_token, device_id) = refresh_result
+            new_access_token = yield auth_handler.issue_access_token(
+                user_id, device_id
+            )
             defer.returnValue((200, {
                 "access_token": new_access_token,
                 "refresh_token": new_refresh_token,
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index ca5468c402..e984ea47db 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -26,7 +26,11 @@ class VersionsRestServlet(RestServlet):
 
     def on_GET(self, request):
         return (200, {
-            "versions": ["r0.0.1"]
+            "versions": [
+                "r0.0.1",
+                "r0.1.0",
+                "r0.2.0",
+            ]
         })
 
 
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 7209d5a37d..9fe2013657 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -15,6 +15,7 @@
 from synapse.http.server import request_handler, respond_with_json_bytes
 from synapse.http.servlet import parse_integer, parse_json_object_from_request
 from synapse.api.errors import SynapseError, Codes
+from synapse.crypto.keyring import KeyLookupError
 
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
@@ -210,9 +211,10 @@ class RemoteKey(Resource):
                     yield self.keyring.get_server_verify_key_v2_direct(
                         server_name, key_ids
                     )
+                except KeyLookupError as e:
+                    logger.info("Failed to fetch key: %s", e)
                 except:
                     logger.exception("Failed to get key for %r", server_name)
-                    pass
             yield self.query_keys(
                 request, query, query_remote_on_cache_miss=False
             )
diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py
index d9fc045fc6..956bd5da75 100644
--- a/synapse/rest/media/v0/content_repository.py
+++ b/synapse/rest/media/v0/content_repository.py
@@ -15,14 +15,12 @@
 
 from synapse.http.server import respond_with_json_bytes, finish_request
 
-from synapse.util.stringutils import random_string
 from synapse.api.errors import (
-    cs_exception, SynapseError, CodeMessageException, Codes, cs_error
+    Codes, cs_error
 )
 
 from twisted.protocols.basic import FileSender
 from twisted.web import server, resource
-from twisted.internet import defer
 
 import base64
 import simplejson as json
@@ -50,64 +48,10 @@ class ContentRepoResource(resource.Resource):
     """
     isLeaf = True
 
-    def __init__(self, hs, directory, auth, external_addr):
+    def __init__(self, hs, directory):
         resource.Resource.__init__(self)
         self.hs = hs
         self.directory = directory
-        self.auth = auth
-        self.external_addr = external_addr.rstrip('/')
-        self.max_upload_size = hs.config.max_upload_size
-
-        if not os.path.isdir(self.directory):
-            os.mkdir(self.directory)
-            logger.info("ContentRepoResource : Created %s directory.",
-                        self.directory)
-
-    @defer.inlineCallbacks
-    def map_request_to_name(self, request):
-        # auth the user
-        requester = yield self.auth.get_user_by_req(request)
-
-        # namespace all file uploads on the user
-        prefix = base64.urlsafe_b64encode(
-            requester.user.to_string()
-        ).replace('=', '')
-
-        # use a random string for the main portion
-        main_part = random_string(24)
-
-        # suffix with a file extension if we can make one. This is nice to
-        # provide a hint to clients on the file information. We will also reuse
-        # this info to spit back the content type to the client.
-        suffix = ""
-        if request.requestHeaders.hasHeader("Content-Type"):
-            content_type = request.requestHeaders.getRawHeaders(
-                "Content-Type")[0]
-            suffix = "." + base64.urlsafe_b64encode(content_type)
-            if (content_type.split("/")[0].lower() in
-                    ["image", "video", "audio"]):
-                file_ext = content_type.split("/")[-1]
-                # be a little paranoid and only allow a-z
-                file_ext = re.sub("[^a-z]", "", file_ext)
-                suffix += "." + file_ext
-
-        file_name = prefix + main_part + suffix
-        file_path = os.path.join(self.directory, file_name)
-        logger.info("User %s is uploading a file to path %s",
-                    request.user.user_id.to_string(),
-                    file_path)
-
-        # keep trying to make a non-clashing file, with a sensible max attempts
-        attempts = 0
-        while os.path.exists(file_path):
-            main_part = random_string(24)
-            file_name = prefix + main_part + suffix
-            file_path = os.path.join(self.directory, file_name)
-            attempts += 1
-            if attempts > 25:  # really? Really?
-                raise SynapseError(500, "Unable to create file.")
-
-        defer.returnValue(file_path)
 
     def render_GET(self, request):
         # no auth here on purpose, to allow anyone to view, even across home
@@ -155,58 +99,6 @@ class ContentRepoResource(resource.Resource):
 
         return server.NOT_DONE_YET
 
-    def render_POST(self, request):
-        self._async_render(request)
-        return server.NOT_DONE_YET
-
     def render_OPTIONS(self, request):
         respond_with_json_bytes(request, 200, {}, send_cors=True)
         return server.NOT_DONE_YET
-
-    @defer.inlineCallbacks
-    def _async_render(self, request):
-        try:
-            # TODO: The checks here are a bit late. The content will have
-            # already been uploaded to a tmp file at this point
-            content_length = request.getHeader("Content-Length")
-            if content_length is None:
-                raise SynapseError(
-                    msg="Request must specify a Content-Length", code=400
-                )
-            if int(content_length) > self.max_upload_size:
-                raise SynapseError(
-                    msg="Upload request body is too large",
-                    code=413,
-                )
-
-            fname = yield self.map_request_to_name(request)
-
-            # TODO I have a suspicious feeling this is just going to block
-            with open(fname, "wb") as f:
-                f.write(request.content.read())
-
-            # FIXME (erikj): These should use constants.
-            file_name = os.path.basename(fname)
-            # FIXME: we can't assume what the repo's public mounted path is
-            # ...plus self-signed SSL won't work to remote clients anyway
-            # ...and we can't assume that it's SSL anyway, as we might want to
-            # serve it via the non-SSL listener...
-            url = "%s/_matrix/content/%s" % (
-                self.external_addr, file_name
-            )
-
-            respond_with_json_bytes(request, 200,
-                                    json.dumps({"content_token": url}),
-                                    send_cors=True)
-
-        except CodeMessageException as e:
-            logger.exception(e)
-            respond_with_json_bytes(request, e.code,
-                                    json.dumps(cs_exception(e)))
-        except Exception as e:
-            logger.error("Failed to store file: %s" % e)
-            respond_with_json_bytes(
-                request,
-                500,
-                json.dumps({"error": "Internal server error"}),
-                send_cors=True)
diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py
index 422ab86fb3..0137458f71 100644
--- a/synapse/rest/media/v1/filepath.py
+++ b/synapse/rest/media/v1/filepath.py
@@ -65,3 +65,9 @@ class MediaFilePaths(object):
             file_id[0:2], file_id[2:4], file_id[4:],
             file_name
         )
+
+    def remote_media_thumbnail_dir(self, server_name, file_id):
+        return os.path.join(
+            self.base_path, "remote_thumbnail", server_name,
+            file_id[0:2], file_id[2:4], file_id[4:],
+        )
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index d96bf9afe2..692e078419 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -26,14 +26,17 @@ from .thumbnailer import Thumbnailer
 
 from synapse.http.matrixfederationclient import MatrixFederationHttpClient
 from synapse.util.stringutils import random_string
+from synapse.api.errors import SynapseError
 
 from twisted.internet import defer, threads
 
-from synapse.util.async import ObservableDeferred
+from synapse.util.async import Linearizer
 from synapse.util.stringutils import is_ascii
 from synapse.util.logcontext import preserve_context_over_fn
 
 import os
+import errno
+import shutil
 
 import cgi
 import logging
@@ -42,8 +45,11 @@ import urlparse
 logger = logging.getLogger(__name__)
 
 
+UPDATE_RECENTLY_ACCESSED_REMOTES_TS = 60 * 1000
+
+
 class MediaRepository(object):
-    def __init__(self, hs, filepaths):
+    def __init__(self, hs):
         self.auth = hs.get_auth()
         self.client = MatrixFederationHttpClient(hs)
         self.clock = hs.get_clock()
@@ -51,11 +57,28 @@ class MediaRepository(object):
         self.store = hs.get_datastore()
         self.max_upload_size = hs.config.max_upload_size
         self.max_image_pixels = hs.config.max_image_pixels
-        self.filepaths = filepaths
-        self.downloads = {}
+        self.filepaths = MediaFilePaths(hs.config.media_store_path)
         self.dynamic_thumbnails = hs.config.dynamic_thumbnails
         self.thumbnail_requirements = hs.config.thumbnail_requirements
 
+        self.remote_media_linearizer = Linearizer()
+
+        self.recently_accessed_remotes = set()
+
+        self.clock.looping_call(
+            self._update_recently_accessed_remotes,
+            UPDATE_RECENTLY_ACCESSED_REMOTES_TS
+        )
+
+    @defer.inlineCallbacks
+    def _update_recently_accessed_remotes(self):
+        media = self.recently_accessed_remotes
+        self.recently_accessed_remotes = set()
+
+        yield self.store.update_cached_last_access_time(
+            media, self.clock.time_msec()
+        )
+
     @staticmethod
     def _makedirs(filepath):
         dirname = os.path.dirname(filepath)
@@ -92,22 +115,12 @@ class MediaRepository(object):
 
         defer.returnValue("mxc://%s/%s" % (self.server_name, media_id))
 
+    @defer.inlineCallbacks
     def get_remote_media(self, server_name, media_id):
         key = (server_name, media_id)
-        download = self.downloads.get(key)
-        if download is None:
-            download = self._get_remote_media_impl(server_name, media_id)
-            download = ObservableDeferred(
-                download,
-                consumeErrors=True
-            )
-            self.downloads[key] = download
-
-            @download.addBoth
-            def callback(media_info):
-                del self.downloads[key]
-                return media_info
-        return download.observe()
+        with (yield self.remote_media_linearizer.queue(key)):
+            media_info = yield self._get_remote_media_impl(server_name, media_id)
+        defer.returnValue(media_info)
 
     @defer.inlineCallbacks
     def _get_remote_media_impl(self, server_name, media_id):
@@ -118,6 +131,11 @@ class MediaRepository(object):
             media_info = yield self._download_remote_file(
                 server_name, media_id
             )
+        else:
+            self.recently_accessed_remotes.add((server_name, media_id))
+            yield self.store.update_cached_last_access_time(
+                [(server_name, media_id)], self.clock.time_msec()
+            )
         defer.returnValue(media_info)
 
     @defer.inlineCallbacks
@@ -134,10 +152,15 @@ class MediaRepository(object):
                 request_path = "/".join((
                     "/_matrix/media/v1/download", server_name, media_id,
                 ))
-                length, headers = yield self.client.get_file(
-                    server_name, request_path, output_stream=f,
-                    max_size=self.max_upload_size,
-                )
+                try:
+                    length, headers = yield self.client.get_file(
+                        server_name, request_path, output_stream=f,
+                        max_size=self.max_upload_size,
+                    )
+                except Exception as e:
+                    logger.warn("Failed to fetch remoted media %r", e)
+                    raise SynapseError(502, "Failed to fetch remoted media")
+
             media_type = headers["Content-Type"][0]
             time_now_ms = self.clock.time_msec()
 
@@ -410,6 +433,41 @@ class MediaRepository(object):
             "height": m_height,
         })
 
+    @defer.inlineCallbacks
+    def delete_old_remote_media(self, before_ts):
+        old_media = yield self.store.get_remote_media_before(before_ts)
+
+        deleted = 0
+
+        for media in old_media:
+            origin = media["media_origin"]
+            media_id = media["media_id"]
+            file_id = media["filesystem_id"]
+            key = (origin, media_id)
+
+            logger.info("Deleting: %r", key)
+
+            with (yield self.remote_media_linearizer.queue(key)):
+                full_path = self.filepaths.remote_media_filepath(origin, file_id)
+                try:
+                    os.remove(full_path)
+                except OSError as e:
+                    logger.warn("Failed to remove file: %r", full_path)
+                    if e.errno == errno.ENOENT:
+                        pass
+                    else:
+                        continue
+
+                thumbnail_dir = self.filepaths.remote_media_thumbnail_dir(
+                    origin, file_id
+                )
+                shutil.rmtree(thumbnail_dir, ignore_errors=True)
+
+                yield self.store.delete_remote_media(origin, media_id)
+                deleted += 1
+
+        defer.returnValue({"deleted": deleted})
+
 
 class MediaRepositoryResource(Resource):
     """File uploading and downloading.
@@ -458,9 +516,8 @@ class MediaRepositoryResource(Resource):
 
     def __init__(self, hs):
         Resource.__init__(self)
-        filepaths = MediaFilePaths(hs.config.media_store_path)
 
-        media_repo = MediaRepository(hs, filepaths)
+        media_repo = hs.get_media_repository()
 
         self.putChild("upload", UploadResource(hs, media_repo))
         self.putChild("download", DownloadResource(hs, media_repo))
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 37dd1de899..bdd0e60c5b 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -29,6 +29,8 @@ from synapse.http.server import (
 from synapse.util.async import ObservableDeferred
 from synapse.util.stringutils import is_ascii
 
+from copy import deepcopy
+
 import os
 import re
 import fnmatch
@@ -252,7 +254,8 @@ class PreviewUrlResource(Resource):
 
         og = {}
         for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
-            og[tag.attrib['property']] = tag.attrib['content']
+            if 'content' in tag.attrib:
+                og[tag.attrib['property']] = tag.attrib['content']
 
         # TODO: grab article: meta tags too, e.g.:
 
@@ -279,7 +282,7 @@ class PreviewUrlResource(Resource):
                 # TODO: consider inlined CSS styles as well as width & height attribs
                 images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
                 images = sorted(images, key=lambda i: (
-                    -1 * int(i.attrib['width']) * int(i.attrib['height'])
+                    -1 * float(i.attrib['width']) * float(i.attrib['height'])
                 ))
                 if not images:
                     images = tree.xpath("//img[@src]")
@@ -287,9 +290,9 @@ class PreviewUrlResource(Resource):
                     og['og:image'] = images[0].attrib['src']
 
         # pre-cache the image for posterity
-        # FIXME: it might be cleaner to use the same flow as the main /preview_url request
-        # itself and benefit from the same caching etc.  But for now we just rely on the
-        # caching on the master request to speed things up.
+        # FIXME: it might be cleaner to use the same flow as the main /preview_url
+        # request itself and benefit from the same caching etc.  But for now we
+        # just rely on the caching on the master request to speed things up.
         if 'og:image' in og and og['og:image']:
             image_info = yield self._download_url(
                 self._rebase_url(og['og:image'], media_info['uri']), requester.user
@@ -328,20 +331,24 @@ class PreviewUrlResource(Resource):
                 # ...or if they are within a <script/> or <style/> tag.
                 # This is a very very very coarse approximation to a plain text
                 # render of the page.
-                text_nodes = tree.xpath("//text()[not(ancestor::header | ancestor::nav | "
-                                        "ancestor::aside | ancestor::footer | "
-                                        "ancestor::script | ancestor::style)]" +
-                                        "[ancestor::body]")
-                text = ''
-                for text_node in text_nodes:
-                    if len(text) < 500:
-                        text += text_node + ' '
-                    else:
-                        break
-                text = re.sub(r'[\t ]+', ' ', text)
-                text = re.sub(r'[\t \r\n]*[\r\n]+', '\n', text)
-                text = text.strip()[:500]
-                og['og:description'] = text if text else None
+
+                # We don't just use XPATH here as that is slow on some machines.
+
+                # We clone `tree` as we modify it.
+                cloned_tree = deepcopy(tree.find("body"))
+
+                TAGS_TO_REMOVE = ("header", "nav", "aside", "footer", "script", "style",)
+                for el in cloned_tree.iter(TAGS_TO_REMOVE):
+                    el.getparent().remove(el)
+
+                # Split all the text nodes into paragraphs (by splitting on new
+                # lines)
+                text_nodes = (
+                    re.sub(r'\s+', '\n', el.text).strip()
+                    for el in cloned_tree.iter()
+                    if el.text and isinstance(el.tag, basestring)  # Removes comments
+                )
+                og['og:description'] = summarize_paragraphs(text_nodes)
 
         # TODO: delete the url downloads to stop diskfilling,
         # as we only ever cared about its OG
@@ -449,3 +456,56 @@ class PreviewUrlResource(Resource):
             content_type.startswith("application/xhtml")
         ):
             return True
+
+
+def summarize_paragraphs(text_nodes, min_size=200, max_size=500):
+    # Try to get a summary of between 200 and 500 words, respecting
+    # first paragraph and then word boundaries.
+    # TODO: Respect sentences?
+
+    description = ''
+
+    # Keep adding paragraphs until we get to the MIN_SIZE.
+    for text_node in text_nodes:
+        if len(description) < min_size:
+            text_node = re.sub(r'[\t \r\n]+', ' ', text_node)
+            description += text_node + '\n\n'
+        else:
+            break
+
+    description = description.strip()
+    description = re.sub(r'[\t ]+', ' ', description)
+    description = re.sub(r'[\t \r\n]*[\r\n]+', '\n\n', description)
+
+    # If the concatenation of paragraphs to get above MIN_SIZE
+    # took us over MAX_SIZE, then we need to truncate mid paragraph
+    if len(description) > max_size:
+        new_desc = ""
+
+        # This splits the paragraph into words, but keeping the
+        # (preceeding) whitespace intact so we can easily concat
+        # words back together.
+        for match in re.finditer("\s*\S+", description):
+            word = match.group()
+
+            # Keep adding words while the total length is less than
+            # MAX_SIZE.
+            if len(word) + len(new_desc) < max_size:
+                new_desc += word
+            else:
+                # At this point the next word *will* take us over
+                # MAX_SIZE, but we also want to ensure that its not
+                # a huge word. If it is add it anyway and we'll
+                # truncate later.
+                if len(new_desc) < min_size:
+                    new_desc += word
+                break
+
+        # Double check that we're not over the limit
+        if len(new_desc) > max_size:
+            new_desc = new_desc[:max_size]
+
+        # We always add an ellipsis because at the very least
+        # we chopped mid paragraph.
+        description = new_desc.strip() + "…"
+    return description if description else None
diff --git a/synapse/server.py b/synapse/server.py
index 01f828819f..6bb4988309 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -19,32 +19,38 @@
 # partial one for unit test mocking.
 
 # Imports required for the default HomeServer() implementation
-from twisted.web.client import BrowserLikePolicyForHTTPS
+import logging
+
 from twisted.enterprise import adbapi
+from twisted.web.client import BrowserLikePolicyForHTTPS
 
-from synapse.federation import initialize_http_replication
-from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory
-from synapse.notifier import Notifier
 from synapse.api.auth import Auth
+from synapse.api.filtering import Filtering
+from synapse.api.ratelimiting import Ratelimiter
+from synapse.appservice.api import ApplicationServiceApi
+from synapse.appservice.scheduler import ApplicationServiceScheduler
+from synapse.crypto.keyring import Keyring
+from synapse.events.builder import EventBuilderFactory
+from synapse.federation import initialize_http_replication
 from synapse.handlers import Handlers
+from synapse.handlers.appservice import ApplicationServicesHandler
+from synapse.handlers.auth import AuthHandler
+from synapse.handlers.device import DeviceHandler
+from synapse.handlers.e2e_keys import E2eKeysHandler
 from synapse.handlers.presence import PresenceHandler
+from synapse.handlers.room import RoomListHandler
 from synapse.handlers.sync import SyncHandler
 from synapse.handlers.typing import TypingHandler
+from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory
+from synapse.http.matrixfederationclient import MatrixFederationHttpClient
+from synapse.notifier import Notifier
+from synapse.push.pusherpool import PusherPool
+from synapse.rest.media.v1.media_repository import MediaRepository
 from synapse.state import StateHandler
 from synapse.storage import DataStore
+from synapse.streams.events import EventSources
 from synapse.util import Clock
 from synapse.util.distributor import Distributor
-from synapse.streams.events import EventSources
-from synapse.api.ratelimiting import Ratelimiter
-from synapse.crypto.keyring import Keyring
-from synapse.push.pusherpool import PusherPool
-from synapse.events.builder import EventBuilderFactory
-from synapse.api.filtering import Filtering
-
-from synapse.http.matrixfederationclient import MatrixFederationHttpClient
-
-import logging
-
 
 logger = logging.getLogger(__name__)
 
@@ -84,6 +90,13 @@ class HomeServer(object):
         'presence_handler',
         'sync_handler',
         'typing_handler',
+        'room_list_handler',
+        'auth_handler',
+        'device_handler',
+        'e2e_keys_handler',
+        'application_service_api',
+        'application_service_scheduler',
+        'application_service_handler',
         'notifier',
         'distributor',
         'client_resource',
@@ -103,6 +116,7 @@ class HomeServer(object):
         'filtering',
         'http_client_context_factory',
         'simple_http_client',
+        'media_repository',
     ]
 
     def __init__(self, hostname, **kwargs):
@@ -179,6 +193,27 @@ class HomeServer(object):
     def build_sync_handler(self):
         return SyncHandler(self)
 
+    def build_room_list_handler(self):
+        return RoomListHandler(self)
+
+    def build_auth_handler(self):
+        return AuthHandler(self)
+
+    def build_device_handler(self):
+        return DeviceHandler(self)
+
+    def build_e2e_keys_handler(self):
+        return E2eKeysHandler(self)
+
+    def build_application_service_api(self):
+        return ApplicationServiceApi(self)
+
+    def build_application_service_scheduler(self):
+        return ApplicationServiceScheduler(self)
+
+    def build_application_service_handler(self):
+        return ApplicationServicesHandler(self)
+
     def build_event_sources(self):
         return EventSources(self)
 
@@ -208,6 +243,9 @@ class HomeServer(object):
             **self.db_config.get("args", {})
         )
 
+    def build_media_repository(self):
+        return MediaRepository(self)
+
     def remove_pusher(self, app_id, push_key, user_id):
         return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
 
diff --git a/synapse/server.pyi b/synapse/server.pyi
new file mode 100644
index 0000000000..9570df5537
--- /dev/null
+++ b/synapse/server.pyi
@@ -0,0 +1,29 @@
+import synapse.api.auth
+import synapse.handlers
+import synapse.handlers.auth
+import synapse.handlers.device
+import synapse.handlers.e2e_keys
+import synapse.storage
+import synapse.state
+
+class HomeServer(object):
+    def get_auth(self) -> synapse.api.auth.Auth:
+        pass
+
+    def get_auth_handler(self) -> synapse.handlers.auth.AuthHandler:
+        pass
+
+    def get_datastore(self) -> synapse.storage.DataStore:
+        pass
+
+    def get_device_handler(self) -> synapse.handlers.device.DeviceHandler:
+        pass
+
+    def get_e2e_keys_handler(self) -> synapse.handlers.e2e_keys.E2eKeysHandler:
+        pass
+
+    def get_handlers(self) -> synapse.handlers.Handlers:
+        pass
+
+    def get_state_handler(self) -> synapse.state.StateHandler:
+        pass
diff --git a/synapse/state.py b/synapse/state.py
index d0f76dc4f5..ef1bc470be 100644
--- a/synapse/state.py
+++ b/synapse/state.py
@@ -379,7 +379,8 @@ class StateHandler(object):
             try:
                 # FIXME: hs.get_auth() is bad style, but we need to do it to
                 # get around circular deps.
-                self.hs.get_auth().check(event, auth_events)
+                # The signatures have already been checked at this point
+                self.hs.get_auth().check(event, auth_events, do_sig_check=False)
                 prev_event = event
             except AuthError:
                 return prev_event
@@ -391,7 +392,8 @@ class StateHandler(object):
             try:
                 # FIXME: hs.get_auth() is bad style, but we need to do it to
                 # get around circular deps.
-                self.hs.get_auth().check(event, auth_events)
+                # The signatures have already been checked at this point
+                self.hs.get_auth().check(event, auth_events, do_sig_check=False)
                 return event
             except AuthError:
                 pass
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 49feb77779..73fb334dd6 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -14,10 +14,12 @@
 # limitations under the License.
 
 from twisted.internet import defer
+
+from synapse.storage.devices import DeviceStore
 from .appservice import (
     ApplicationServiceStore, ApplicationServiceTransactionStore
 )
-from ._base import Cache
+from ._base import LoggingTransaction
 from .directory import DirectoryStore
 from .events import EventsStore
 from .presence import PresenceStore, UserPresenceState
@@ -45,6 +47,7 @@ from .search import SearchStore
 from .tags import TagsStore
 from .account_data import AccountDataStore
 from .openid import OpenIdStore
+from .client_ips import ClientIpStore
 
 from .util.id_generators import IdGenerator, StreamIdGenerator, ChainedIdGenerator
 
@@ -58,12 +61,6 @@ import logging
 logger = logging.getLogger(__name__)
 
 
-# Number of msec of granularity to store the user IP 'last seen' time. Smaller
-# times give more inserts into the database even for readonly API hits
-# 120 seconds == 2 minutes
-LAST_SEEN_GRANULARITY = 120 * 1000
-
-
 class DataStore(RoomMemberStore, RoomStore,
                 RegistrationStore, StreamStore, ProfileStore,
                 PresenceStore, TransactionStore,
@@ -84,6 +81,8 @@ class DataStore(RoomMemberStore, RoomStore,
                 AccountDataStore,
                 EventPushActionsStore,
                 OpenIdStore,
+                ClientIpStore,
+                DeviceStore,
                 ):
 
     def __init__(self, db_conn, hs):
@@ -91,17 +90,13 @@ class DataStore(RoomMemberStore, RoomStore,
         self._clock = hs.get_clock()
         self.database_engine = hs.database_engine
 
-        self.client_ip_last_seen = Cache(
-            name="client_ip_last_seen",
-            keylen=4,
-        )
-
         self._stream_id_gen = StreamIdGenerator(
             db_conn, "events", "stream_ordering",
             extra_tables=[("local_invites", "stream_id")]
         )
         self._backfill_id_gen = StreamIdGenerator(
-            db_conn, "events", "stream_ordering", step=-1
+            db_conn, "events", "stream_ordering", step=-1,
+            extra_tables=[("ex_outlier_stream", "event_stream_ordering")]
         )
         self._receipts_id_gen = StreamIdGenerator(
             db_conn, "receipts_linearized", "stream_id"
@@ -149,7 +144,7 @@ class DataStore(RoomMemberStore, RoomStore,
             "AccountDataAndTagsChangeCache", account_max,
         )
 
-        self.__presence_on_startup = self._get_active_presence(db_conn)
+        self._presence_on_startup = self._get_active_presence(db_conn)
 
         presence_cache_prefill, min_presence_val = self._get_cache_dict(
             db_conn, "presence_stream",
@@ -174,7 +169,12 @@ class DataStore(RoomMemberStore, RoomStore,
             prefilled_cache=push_rules_prefill,
         )
 
-        cur = db_conn.cursor()
+        cur = LoggingTransaction(
+            db_conn.cursor(),
+            name="_find_stream_orderings_for_times_txn",
+            database_engine=self.database_engine,
+            after_callbacks=[]
+        )
         self._find_stream_orderings_for_times_txn(cur)
         cur.close()
 
@@ -185,8 +185,8 @@ class DataStore(RoomMemberStore, RoomStore,
         super(DataStore, self).__init__(hs)
 
     def take_presence_startup_info(self):
-        active_on_startup = self.__presence_on_startup
-        self.__presence_on_startup = None
+        active_on_startup = self._presence_on_startup
+        self._presence_on_startup = None
         return active_on_startup
 
     def _get_active_presence(self, db_conn):
@@ -212,39 +212,6 @@ class DataStore(RoomMemberStore, RoomStore,
         return [UserPresenceState(**row) for row in rows]
 
     @defer.inlineCallbacks
-    def insert_client_ip(self, user, access_token, ip, user_agent):
-        now = int(self._clock.time_msec())
-        key = (user.to_string(), access_token, ip)
-
-        try:
-            last_seen = self.client_ip_last_seen.get(key)
-        except KeyError:
-            last_seen = None
-
-        # Rate-limited inserts
-        if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
-            defer.returnValue(None)
-
-        self.client_ip_last_seen.prefill(key, now)
-
-        # It's safe not to lock here: a) no unique constraint,
-        # b) LAST_SEEN_GRANULARITY makes concurrent updates incredibly unlikely
-        yield self._simple_upsert(
-            "user_ips",
-            keyvalues={
-                "user_id": user.to_string(),
-                "access_token": access_token,
-                "ip": ip,
-                "user_agent": user_agent,
-            },
-            values={
-                "last_seen": now,
-            },
-            desc="insert_client_ip",
-            lock=False,
-        )
-
-    @defer.inlineCallbacks
     def count_daily_users(self):
         """
         Counts the number of users who used this homeserver in the last 24 hours.
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 56a0dd80f3..0117fdc639 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -152,6 +152,7 @@ class SQLBaseStore(object):
 
     def __init__(self, hs):
         self.hs = hs
+        self._clock = hs.get_clock()
         self._db_pool = hs.get_db_pool()
 
         self._previous_txn_total_time = 0
@@ -596,10 +597,13 @@ class SQLBaseStore(object):
         more rows, returning the result as a list of dicts.
 
         Args:
-            table : string giving the table name
-            keyvalues : dict of column names and values to select the rows with,
-            or None to not apply a WHERE clause.
-            retcols : list of strings giving the names of the columns to return
+            table (str): the table name
+            keyvalues (dict[str, Any] | None):
+                column names and values to select the rows with, or None to not
+                apply a WHERE clause.
+            retcols (iterable[str]): the names of the columns to return
+        Returns:
+            defer.Deferred: resolves to list[dict[str, Any]]
         """
         return self.runInteraction(
             desc,
@@ -614,9 +618,11 @@ class SQLBaseStore(object):
 
         Args:
             txn : Transaction object
-            table : string giving the table name
-            keyvalues : dict of column names and values to select the rows with
-            retcols : list of strings giving the names of the columns to return
+            table (str): the table name
+            keyvalues (dict[str, T] | None):
+                column names and values to select the rows with, or None to not
+                apply a WHERE clause.
+            retcols (iterable[str]): the names of the columns to return
         """
         if keyvalues:
             sql = "SELECT %s FROM %s WHERE %s" % (
@@ -806,6 +812,11 @@ class SQLBaseStore(object):
         if txn.rowcount > 1:
             raise StoreError(500, "more than one row matched")
 
+    def _simple_delete(self, table, keyvalues, desc):
+        return self.runInteraction(
+            desc, self._simple_delete_txn, table, keyvalues
+        )
+
     @staticmethod
     def _simple_delete_txn(txn, table, keyvalues):
         sql = "DELETE FROM %s WHERE %s" % (
diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py
index ec7e8d40d2..3fa226e92d 100644
--- a/synapse/storage/account_data.py
+++ b/synapse/storage/account_data.py
@@ -138,6 +138,9 @@ class AccountDataStore(SQLBaseStore):
             A deferred pair of lists of tuples of stream_id int, user_id string,
             room_id string, type string, and content string.
         """
+        if last_room_id == current_id and last_global_id == current_id:
+            return defer.succeed(([], []))
+
         def get_updated_account_data_txn(txn):
             sql = (
                 "SELECT stream_id, user_id, account_data_type, content"
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index feb9d228ae..d1ee533fac 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -298,6 +298,7 @@ class ApplicationServiceTransactionStore(SQLBaseStore):
             dict(txn_id=txn_id, as_id=service.id)
         )
 
+    @defer.inlineCallbacks
     def get_oldest_unsent_txn(self, service):
         """Get the oldest transaction which has not been sent for this
         service.
@@ -308,12 +309,23 @@ class ApplicationServiceTransactionStore(SQLBaseStore):
             A Deferred which resolves to an AppServiceTransaction or
             None.
         """
-        return self.runInteraction(
+        entry = yield self.runInteraction(
             "get_oldest_unsent_appservice_txn",
             self._get_oldest_unsent_txn,
             service
         )
 
+        if not entry:
+            defer.returnValue(None)
+
+        event_ids = json.loads(entry["event_ids"])
+
+        events = yield self._get_events(event_ids)
+
+        defer.returnValue(AppServiceTransaction(
+            service=service, id=entry["txn_id"], events=events
+        ))
+
     def _get_oldest_unsent_txn(self, txn, service):
         # Monotonically increasing txn ids, so just select the smallest
         # one in the txns table (we delete them when they are sent)
@@ -328,12 +340,7 @@ class ApplicationServiceTransactionStore(SQLBaseStore):
 
         entry = rows[0]
 
-        event_ids = json.loads(entry["event_ids"])
-        events = self._get_events_txn(txn, event_ids)
-
-        return AppServiceTransaction(
-            service=service, id=entry["txn_id"], events=events
-        )
+        return entry
 
     def _get_last_txn(self, txn, service_id):
         txn.execute(
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 66a995157d..30d0e4c5dc 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 from ._base import SQLBaseStore
+from . import engines
 
 from twisted.internet import defer
 
@@ -87,10 +88,12 @@ class BackgroundUpdateStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def start_doing_background_updates(self):
-        while True:
-            if self._background_update_timer is not None:
-                return
+        assert self._background_update_timer is None, \
+            "background updates already running"
+
+        logger.info("Starting background schema updates")
 
+        while True:
             sleep = defer.Deferred()
             self._background_update_timer = self._clock.call_later(
                 self.BACKGROUND_UPDATE_INTERVAL_MS / 1000., sleep.callback, None
@@ -101,22 +104,23 @@ class BackgroundUpdateStore(SQLBaseStore):
                 self._background_update_timer = None
 
             try:
-                result = yield self.do_background_update(
+                result = yield self.do_next_background_update(
                     self.BACKGROUND_UPDATE_DURATION_MS
                 )
             except:
                 logger.exception("Error doing update")
-
-            if result is None:
-                logger.info(
-                    "No more background updates to do."
-                    " Unscheduling background update task."
-                )
-                return
+            else:
+                if result is None:
+                    logger.info(
+                        "No more background updates to do."
+                        " Unscheduling background update task."
+                    )
+                    defer.returnValue(None)
 
     @defer.inlineCallbacks
-    def do_background_update(self, desired_duration_ms):
-        """Does some amount of work on a background update
+    def do_next_background_update(self, desired_duration_ms):
+        """Does some amount of work on the next queued background update
+
         Args:
             desired_duration_ms(float): How long we want to spend
                 updating.
@@ -135,11 +139,21 @@ class BackgroundUpdateStore(SQLBaseStore):
                 self._background_update_queue.append(update['update_name'])
 
         if not self._background_update_queue:
+            # no work left to do
             defer.returnValue(None)
 
+        # pop from the front, and add back to the back
         update_name = self._background_update_queue.pop(0)
         self._background_update_queue.append(update_name)
 
+        res = yield self._do_background_update(update_name, desired_duration_ms)
+        defer.returnValue(res)
+
+    @defer.inlineCallbacks
+    def _do_background_update(self, update_name, desired_duration_ms):
+        logger.info("Starting update batch on background update '%s'",
+                    update_name)
+
         update_handler = self._background_update_handlers[update_name]
 
         performance = self._background_update_performance.get(update_name)
@@ -202,6 +216,64 @@ class BackgroundUpdateStore(SQLBaseStore):
         """
         self._background_update_handlers[update_name] = update_handler
 
+    def register_background_index_update(self, update_name, index_name,
+                                         table, columns):
+        """Helper for store classes to do a background index addition
+
+        To use:
+
+        1. use a schema delta file to add a background update. Example:
+            INSERT INTO background_updates (update_name, progress_json) VALUES
+                ('my_new_index', '{}');
+
+        2. In the Store constructor, call this method
+
+        Args:
+            update_name (str): update_name to register for
+            index_name (str): name of index to add
+            table (str): table to add index to
+            columns (list[str]): columns/expressions to include in index
+        """
+
+        # if this is postgres, we add the indexes concurrently. Otherwise
+        # we fall back to doing it inline
+        if isinstance(self.database_engine, engines.PostgresEngine):
+            conc = True
+        else:
+            conc = False
+
+        sql = "CREATE INDEX %(conc)s %(name)s ON %(table)s (%(columns)s)" \
+              % {
+                  "conc": "CONCURRENTLY" if conc else "",
+                  "name": index_name,
+                  "table": table,
+                  "columns": ", ".join(columns),
+              }
+
+        def create_index_concurrently(conn):
+            conn.rollback()
+            # postgres insists on autocommit for the index
+            conn.set_session(autocommit=True)
+            c = conn.cursor()
+            c.execute(sql)
+            conn.set_session(autocommit=False)
+
+        def create_index(conn):
+            c = conn.cursor()
+            c.execute(sql)
+
+        @defer.inlineCallbacks
+        def updater(progress, batch_size):
+            logger.info("Adding index %s to %s", index_name, table)
+            if conc:
+                yield self.runWithConnection(create_index_concurrently)
+            else:
+                yield self.runWithConnection(create_index)
+            yield self._end_background_update(update_name)
+            defer.returnValue(1)
+
+        self.register_background_update_handler(update_name, updater)
+
     def start_background_update(self, update_name, progress):
         """Starts a background update running.
 
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py
new file mode 100644
index 0000000000..71e5ea112f
--- /dev/null
+++ b/synapse/storage/client_ips.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from ._base import Cache
+from . import background_updates
+
+logger = logging.getLogger(__name__)
+
+# Number of msec of granularity to store the user IP 'last seen' time. Smaller
+# times give more inserts into the database even for readonly API hits
+# 120 seconds == 2 minutes
+LAST_SEEN_GRANULARITY = 120 * 1000
+
+
+class ClientIpStore(background_updates.BackgroundUpdateStore):
+    def __init__(self, hs):
+        self.client_ip_last_seen = Cache(
+            name="client_ip_last_seen",
+            keylen=4,
+        )
+
+        super(ClientIpStore, self).__init__(hs)
+
+        self.register_background_index_update(
+            "user_ips_device_index",
+            index_name="user_ips_device_id",
+            table="user_ips",
+            columns=["user_id", "device_id", "last_seen"],
+        )
+
+    @defer.inlineCallbacks
+    def insert_client_ip(self, user, access_token, ip, user_agent, device_id):
+        now = int(self._clock.time_msec())
+        key = (user.to_string(), access_token, ip)
+
+        try:
+            last_seen = self.client_ip_last_seen.get(key)
+        except KeyError:
+            last_seen = None
+
+        # Rate-limited inserts
+        if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
+            defer.returnValue(None)
+
+        self.client_ip_last_seen.prefill(key, now)
+
+        # It's safe not to lock here: a) no unique constraint,
+        # b) LAST_SEEN_GRANULARITY makes concurrent updates incredibly unlikely
+        yield self._simple_upsert(
+            "user_ips",
+            keyvalues={
+                "user_id": user.to_string(),
+                "access_token": access_token,
+                "ip": ip,
+                "user_agent": user_agent,
+                "device_id": device_id,
+            },
+            values={
+                "last_seen": now,
+            },
+            desc="insert_client_ip",
+            lock=False,
+        )
+
+    @defer.inlineCallbacks
+    def get_last_client_ip_by_device(self, devices):
+        """For each device_id listed, give the user_ip it was last seen on
+
+        Args:
+            devices (iterable[(str, str)]):  list of (user_id, device_id) pairs
+
+        Returns:
+            defer.Deferred: resolves to a dict, where the keys
+            are (user_id, device_id) tuples. The values are also dicts, with
+            keys giving the column names
+        """
+
+        res = yield self.runInteraction(
+            "get_last_client_ip_by_device",
+            self._get_last_client_ip_by_device_txn,
+            retcols=(
+                "user_id",
+                "access_token",
+                "ip",
+                "user_agent",
+                "device_id",
+                "last_seen",
+            ),
+            devices=devices
+        )
+
+        ret = {(d["user_id"], d["device_id"]): d for d in res}
+        defer.returnValue(ret)
+
+    @classmethod
+    def _get_last_client_ip_by_device_txn(cls, txn, devices, retcols):
+        where_clauses = []
+        bindings = []
+        for (user_id, device_id) in devices:
+            if device_id is None:
+                where_clauses.append("(user_id = ? AND device_id IS NULL)")
+                bindings.extend((user_id, ))
+            else:
+                where_clauses.append("(user_id = ? AND device_id = ?)")
+                bindings.extend((user_id, device_id))
+
+        inner_select = (
+            "SELECT MAX(last_seen) mls, user_id, device_id FROM user_ips "
+            "WHERE %(where)s "
+            "GROUP BY user_id, device_id"
+        ) % {
+            "where": " OR ".join(where_clauses),
+        }
+
+        sql = (
+            "SELECT %(retcols)s FROM user_ips "
+            "JOIN (%(inner_select)s) ips ON"
+            "    user_ips.last_seen = ips.mls AND"
+            "    user_ips.user_id = ips.user_id AND"
+            "    (user_ips.device_id = ips.device_id OR"
+            "         (user_ips.device_id IS NULL AND ips.device_id IS NULL)"
+            "    )"
+        ) % {
+            "retcols": ",".join("user_ips." + c for c in retcols),
+            "inner_select": inner_select,
+        }
+
+        txn.execute(sql, bindings)
+        return cls.cursor_to_dict(txn)
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
new file mode 100644
index 0000000000..afd6530cab
--- /dev/null
+++ b/synapse/storage/devices.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.errors import StoreError
+from ._base import SQLBaseStore
+
+logger = logging.getLogger(__name__)
+
+
+class DeviceStore(SQLBaseStore):
+    @defer.inlineCallbacks
+    def store_device(self, user_id, device_id,
+                     initial_device_display_name,
+                     ignore_if_known=True):
+        """Ensure the given device is known; add it to the store if not
+
+        Args:
+            user_id (str): id of user associated with the device
+            device_id (str): id of device
+            initial_device_display_name (str): initial displayname of the
+               device
+            ignore_if_known (bool): ignore integrity errors which mean the
+               device is already known
+        Returns:
+            defer.Deferred
+        Raises:
+            StoreError: if ignore_if_known is False and the device was already
+               known
+        """
+        try:
+            yield self._simple_insert(
+                "devices",
+                values={
+                    "user_id": user_id,
+                    "device_id": device_id,
+                    "display_name": initial_device_display_name
+                },
+                desc="store_device",
+                or_ignore=ignore_if_known,
+            )
+        except Exception as e:
+            logger.error("store_device with device_id=%s failed: %s",
+                         device_id, e)
+            raise StoreError(500, "Problem storing device.")
+
+    def get_device(self, user_id, device_id):
+        """Retrieve a device.
+
+        Args:
+            user_id (str): The ID of the user which owns the device
+            device_id (str): The ID of the device to retrieve
+        Returns:
+            defer.Deferred for a dict containing the device information
+        Raises:
+            StoreError: if the device is not found
+        """
+        return self._simple_select_one(
+            table="devices",
+            keyvalues={"user_id": user_id, "device_id": device_id},
+            retcols=("user_id", "device_id", "display_name"),
+            desc="get_device",
+        )
+
+    def delete_device(self, user_id, device_id):
+        """Delete a device.
+
+        Args:
+            user_id (str): The ID of the user which owns the device
+            device_id (str): The ID of the device to delete
+        Returns:
+            defer.Deferred
+        """
+        return self._simple_delete_one(
+            table="devices",
+            keyvalues={"user_id": user_id, "device_id": device_id},
+            desc="delete_device",
+        )
+
+    def update_device(self, user_id, device_id, new_display_name=None):
+        """Update a device.
+
+        Args:
+            user_id (str): The ID of the user which owns the device
+            device_id (str): The ID of the device to update
+            new_display_name (str|None): new displayname for device; None
+               to leave unchanged
+        Raises:
+            StoreError: if the device is not found
+        Returns:
+            defer.Deferred
+        """
+        updates = {}
+        if new_display_name is not None:
+            updates["display_name"] = new_display_name
+        if not updates:
+            return defer.succeed(None)
+        return self._simple_update_one(
+            table="devices",
+            keyvalues={"user_id": user_id, "device_id": device_id},
+            updatevalues=updates,
+            desc="update_device",
+        )
+
+    @defer.inlineCallbacks
+    def get_devices_by_user(self, user_id):
+        """Retrieve all of a user's registered devices.
+
+        Args:
+            user_id (str):
+        Returns:
+            defer.Deferred: resolves to a dict from device_id to a dict
+            containing "device_id", "user_id" and "display_name" for each
+            device.
+        """
+        devices = yield self._simple_select_list(
+            table="devices",
+            keyvalues={"user_id": user_id},
+            retcols=("user_id", "device_id", "display_name"),
+            desc="get_devices_by_user"
+        )
+
+        defer.returnValue({d["device_id"]: d for d in devices})
diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py
index 2e89066515..385d607056 100644
--- a/synapse/storage/end_to_end_keys.py
+++ b/synapse/storage/end_to_end_keys.py
@@ -12,6 +12,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import collections
+
+import twisted.internet.defer
 
 from ._base import SQLBaseStore
 
@@ -36,24 +39,49 @@ class EndToEndKeyStore(SQLBaseStore):
             query_list(list): List of pairs of user_ids and device_ids.
         Returns:
             Dict mapping from user-id to dict mapping from device_id to
-            key json byte strings.
+            dict containing "key_json", "device_display_name".
         """
-        def _get_e2e_device_keys(txn):
-            result = {}
-            for user_id, device_id in query_list:
-                user_result = result.setdefault(user_id, {})
-                keyvalues = {"user_id": user_id}
-                if device_id:
-                    keyvalues["device_id"] = device_id
-                rows = self._simple_select_list_txn(
-                    txn, table="e2e_device_keys_json",
-                    keyvalues=keyvalues,
-                    retcols=["device_id", "key_json"]
-                )
-                for row in rows:
-                    user_result[row["device_id"]] = row["key_json"]
-            return result
-        return self.runInteraction("get_e2e_device_keys", _get_e2e_device_keys)
+        if not query_list:
+            return {}
+
+        return self.runInteraction(
+            "get_e2e_device_keys", self._get_e2e_device_keys_txn, query_list
+        )
+
+    def _get_e2e_device_keys_txn(self, txn, query_list):
+        query_clauses = []
+        query_params = []
+
+        for (user_id, device_id) in query_list:
+            query_clause = "k.user_id = ?"
+            query_params.append(user_id)
+
+            if device_id:
+                query_clause += " AND k.device_id = ?"
+                query_params.append(device_id)
+
+            query_clauses.append(query_clause)
+
+        sql = (
+            "SELECT k.user_id, k.device_id, "
+            "    d.display_name AS device_display_name, "
+            "    k.key_json"
+            " FROM e2e_device_keys_json k"
+            "    LEFT JOIN devices d ON d.user_id = k.user_id"
+            "      AND d.device_id = k.device_id"
+            " WHERE %s"
+        ) % (
+            " OR ".join("(" + q + ")" for q in query_clauses)
+        )
+
+        txn.execute(sql, query_params)
+        rows = self.cursor_to_dict(txn)
+
+        result = collections.defaultdict(dict)
+        for row in rows:
+            result[row["user_id"]][row["device_id"]] = row
+
+        return result
 
     def add_e2e_one_time_keys(self, user_id, device_id, time_now, key_list):
         def _add_e2e_one_time_keys(txn):
@@ -123,3 +151,16 @@ class EndToEndKeyStore(SQLBaseStore):
         return self.runInteraction(
             "claim_e2e_one_time_keys", _claim_e2e_one_time_keys
         )
+
+    @twisted.internet.defer.inlineCallbacks
+    def delete_e2e_keys_by_device(self, user_id, device_id):
+        yield self._simple_delete(
+            table="e2e_device_keys_json",
+            keyvalues={"user_id": user_id, "device_id": device_id},
+            desc="delete_e2e_device_keys_by_device"
+        )
+        yield self._simple_delete(
+            table="e2e_one_time_keys_json",
+            keyvalues={"user_id": user_id, "device_id": device_id},
+            desc="delete_e2e_one_time_keys_by_device"
+        )
diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py
index 7bb5de1fe7..338b495611 100644
--- a/synapse/storage/engines/__init__.py
+++ b/synapse/storage/engines/__init__.py
@@ -32,7 +32,7 @@ def create_engine(database_config):
 
     if engine_class:
         module = importlib.import_module(name)
-        return engine_class(module)
+        return engine_class(module, database_config)
 
     raise RuntimeError(
         "Unsupported database engine '%s'" % (name,)
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index c2290943b4..a6ae79dfad 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -19,9 +19,10 @@ from ._base import IncorrectDatabaseSetup
 class PostgresEngine(object):
     single_threaded = False
 
-    def __init__(self, database_module):
+    def __init__(self, database_module, database_config):
         self.module = database_module
         self.module.extensions.register_type(self.module.extensions.UNICODE)
+        self.synchronous_commit = database_config.get("synchronous_commit", True)
 
     def check_database(self, txn):
         txn.execute("SHOW SERVER_ENCODING")
@@ -40,9 +41,19 @@ class PostgresEngine(object):
         db_conn.set_isolation_level(
             self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
         )
+        # Asynchronous commit, don't wait for the server to call fsync before
+        # ending the transaction.
+        # https://www.postgresql.org/docs/current/static/wal-async-commit.html
+        if not self.synchronous_commit:
+            cursor = db_conn.cursor()
+            cursor.execute("SET synchronous_commit TO OFF")
+            cursor.close()
 
     def is_deadlock(self, error):
         if isinstance(error, self.module.DatabaseError):
+            # https://www.postgresql.org/docs/current/static/errcodes-appendix.html
+            # "40001" serialization_failure
+            # "40P01" deadlock_detected
             return error.pgcode in ["40001", "40P01"]
         return False
 
diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite3.py
index 14203aa500..755c9a1f07 100644
--- a/synapse/storage/engines/sqlite3.py
+++ b/synapse/storage/engines/sqlite3.py
@@ -21,7 +21,7 @@ import struct
 class Sqlite3Engine(object):
     single_threaded = True
 
-    def __init__(self, database_module):
+    def __init__(self, database_module, database_config):
         self.module = database_module
 
     def check_database(self, txn):
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index 5123072c44..0ba0310c0d 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -16,6 +16,8 @@
 from ._base import SQLBaseStore
 from twisted.internet import defer
 from synapse.util.caches.descriptors import cachedInlineCallbacks
+from synapse.types import RoomStreamToken
+from .stream import lower_bound
 
 import logging
 import ujson as json
@@ -73,6 +75,9 @@ class EventPushActionsStore(SQLBaseStore):
 
             stream_ordering = results[0][0]
             topological_ordering = results[0][1]
+            token = RoomStreamToken(
+                topological_ordering, stream_ordering
+            )
 
             sql = (
                 "SELECT sum(notif), sum(highlight)"
@@ -80,15 +85,10 @@ class EventPushActionsStore(SQLBaseStore):
                 " WHERE"
                 " user_id = ?"
                 " AND room_id = ?"
-                " AND ("
-                "       topological_ordering > ?"
-                "       OR (topological_ordering = ? AND stream_ordering > ?)"
-                ")"
-            )
-            txn.execute(sql, (
-                user_id, room_id,
-                topological_ordering, topological_ordering, stream_ordering
-            ))
+                " AND %s"
+            ) % (lower_bound(token, self.database_engine, inclusive=False),)
+
+            txn.execute(sql, (user_id, room_id))
             row = txn.fetchone()
             if row:
                 return {
@@ -117,23 +117,42 @@ class EventPushActionsStore(SQLBaseStore):
         defer.returnValue(ret)
 
     @defer.inlineCallbacks
-    def get_unread_push_actions_for_user_in_range(self, user_id,
-                                                  min_stream_ordering,
-                                                  max_stream_ordering=None):
+    def get_unread_push_actions_for_user_in_range_for_http(
+        self, user_id, min_stream_ordering, max_stream_ordering, limit=20
+    ):
+        """Get a list of the most recent unread push actions for a given user,
+        within the given stream ordering range. Called by the httppusher.
+
+        Args:
+            user_id (str): The user to fetch push actions for.
+            min_stream_ordering(int): The exclusive lower bound on the
+                stream ordering of event push actions to fetch.
+            max_stream_ordering(int): The inclusive upper bound on the
+                stream ordering of event push actions to fetch.
+            limit (int): The maximum number of rows to return.
+        Returns:
+            A promise which resolves to a list of dicts with the keys "event_id",
+            "room_id", "stream_ordering", "actions".
+            The list will be ordered by ascending stream_ordering.
+            The list will have between 0~limit entries.
+        """
+        # find rooms that have a read receipt in them and return the next
+        # push actions
         def get_after_receipt(txn):
+            # find rooms that have a read receipt in them and return the next
+            # push actions
             sql = (
-                "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, "
-                "e.received_ts "
-                "FROM ("
-                "   SELECT room_id, user_id, "
-                "       max(topological_ordering) as topological_ordering, "
-                "       max(stream_ordering) as stream_ordering "
-                "       FROM events"
-                "   NATURAL JOIN receipts_linearized WHERE receipt_type = 'm.read'"
-                "   GROUP BY room_id, user_id"
+                "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions"
+                " FROM ("
+                "   SELECT room_id,"
+                "       MAX(topological_ordering) as topological_ordering,"
+                "       MAX(stream_ordering) as stream_ordering"
+                "   FROM events"
+                "   INNER JOIN receipts_linearized USING (room_id, event_id)"
+                "   WHERE receipt_type = 'm.read' AND user_id = ?"
+                "   GROUP BY room_id"
                 ") AS rl,"
                 " event_push_actions AS ep"
-                " INNER JOIN events AS e USING (room_id, event_id)"
                 " WHERE"
                 "   ep.room_id = rl.room_id"
                 "   AND ("
@@ -143,45 +162,163 @@ class EventPushActionsStore(SQLBaseStore):
                 "           AND ep.stream_ordering > rl.stream_ordering"
                 "       )"
                 "   )"
-                "   AND ep.stream_ordering > ?"
                 "   AND ep.user_id = ?"
-                "   AND ep.user_id = rl.user_id"
+                "   AND ep.stream_ordering > ?"
+                "   AND ep.stream_ordering <= ?"
+                " ORDER BY ep.stream_ordering ASC LIMIT ?"
             )
-            args = [min_stream_ordering, user_id]
-            if max_stream_ordering is not None:
-                sql += " AND ep.stream_ordering <= ?"
-                args.append(max_stream_ordering)
-            sql += " ORDER BY ep.stream_ordering ASC"
+            args = [
+                user_id, user_id,
+                min_stream_ordering, max_stream_ordering, limit,
+            ]
             txn.execute(sql, args)
             return txn.fetchall()
         after_read_receipt = yield self.runInteraction(
-            "get_unread_push_actions_for_user_in_range", get_after_receipt
+            "get_unread_push_actions_for_user_in_range_http_arr", get_after_receipt
         )
 
+        # There are rooms with push actions in them but you don't have a read receipt in
+        # them e.g. rooms you've been invited to, so get push actions for rooms which do
+        # not have read receipts in them too.
         def get_no_receipt(txn):
             sql = (
                 "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
                 " e.received_ts"
                 " FROM event_push_actions AS ep"
-                " JOIN events e ON ep.room_id = e.room_id AND ep.event_id = e.event_id"
-                " WHERE ep.room_id not in ("
-                "   SELECT room_id FROM events NATURAL JOIN receipts_linearized"
+                " INNER JOIN events AS e USING (room_id, event_id)"
+                " WHERE"
+                "   ep.room_id NOT IN ("
+                "     SELECT room_id FROM receipts_linearized"
+                "       WHERE receipt_type = 'm.read' AND user_id = ?"
+                "       GROUP BY room_id"
+                "   )"
+                "   AND ep.user_id = ?"
+                "   AND ep.stream_ordering > ?"
+                "   AND ep.stream_ordering <= ?"
+                " ORDER BY ep.stream_ordering ASC LIMIT ?"
+            )
+            args = [
+                user_id, user_id,
+                min_stream_ordering, max_stream_ordering, limit,
+            ]
+            txn.execute(sql, args)
+            return txn.fetchall()
+        no_read_receipt = yield self.runInteraction(
+            "get_unread_push_actions_for_user_in_range_http_nrr", get_no_receipt
+        )
+
+        notifs = [
+            {
+                "event_id": row[0],
+                "room_id": row[1],
+                "stream_ordering": row[2],
+                "actions": json.loads(row[3]),
+            } for row in after_read_receipt + no_read_receipt
+        ]
+
+        # Now sort it so it's ordered correctly, since currently it will
+        # contain results from the first query, correctly ordered, followed
+        # by results from the second query, but we want them all ordered
+        # by stream_ordering, oldest first.
+        notifs.sort(key=lambda r: r['stream_ordering'])
+
+        # Take only up to the limit. We have to stop at the limit because
+        # one of the subqueries may have hit the limit.
+        defer.returnValue(notifs[:limit])
+
+    @defer.inlineCallbacks
+    def get_unread_push_actions_for_user_in_range_for_email(
+        self, user_id, min_stream_ordering, max_stream_ordering, limit=20
+    ):
+        """Get a list of the most recent unread push actions for a given user,
+        within the given stream ordering range. Called by the emailpusher
+
+        Args:
+            user_id (str): The user to fetch push actions for.
+            min_stream_ordering(int): The exclusive lower bound on the
+                stream ordering of event push actions to fetch.
+            max_stream_ordering(int): The inclusive upper bound on the
+                stream ordering of event push actions to fetch.
+            limit (int): The maximum number of rows to return.
+        Returns:
+            A promise which resolves to a list of dicts with the keys "event_id",
+            "room_id", "stream_ordering", "actions", "received_ts".
+            The list will be ordered by descending received_ts.
+            The list will have between 0~limit entries.
+        """
+        # find rooms that have a read receipt in them and return the most recent
+        # push actions
+        def get_after_receipt(txn):
+            sql = (
+                "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
+                "  e.received_ts"
+                " FROM ("
+                "   SELECT room_id,"
+                "       MAX(topological_ordering) as topological_ordering,"
+                "       MAX(stream_ordering) as stream_ordering"
+                "   FROM events"
+                "   INNER JOIN receipts_linearized USING (room_id, event_id)"
                 "   WHERE receipt_type = 'm.read' AND user_id = ?"
                 "   GROUP BY room_id"
-                ") AND ep.user_id = ? AND ep.stream_ordering > ?"
+                ") AS rl,"
+                " event_push_actions AS ep"
+                " INNER JOIN events AS e USING (room_id, event_id)"
+                " WHERE"
+                "   ep.room_id = rl.room_id"
+                "   AND ("
+                "       ep.topological_ordering > rl.topological_ordering"
+                "       OR ("
+                "           ep.topological_ordering = rl.topological_ordering"
+                "           AND ep.stream_ordering > rl.stream_ordering"
+                "       )"
+                "   )"
+                "   AND ep.user_id = ?"
+                "   AND ep.stream_ordering > ?"
+                "   AND ep.stream_ordering <= ?"
+                " ORDER BY ep.stream_ordering DESC LIMIT ?"
             )
-            args = [user_id, user_id, min_stream_ordering]
-            if max_stream_ordering is not None:
-                sql += " AND ep.stream_ordering <= ?"
-                args.append(max_stream_ordering)
-            sql += " ORDER BY ep.stream_ordering ASC"
+            args = [
+                user_id, user_id,
+                min_stream_ordering, max_stream_ordering, limit,
+            ]
+            txn.execute(sql, args)
+            return txn.fetchall()
+        after_read_receipt = yield self.runInteraction(
+            "get_unread_push_actions_for_user_in_range_email_arr", get_after_receipt
+        )
+
+        # There are rooms with push actions in them but you don't have a read receipt in
+        # them e.g. rooms you've been invited to, so get push actions for rooms which do
+        # not have read receipts in them too.
+        def get_no_receipt(txn):
+            sql = (
+                "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
+                " e.received_ts"
+                " FROM event_push_actions AS ep"
+                " INNER JOIN events AS e USING (room_id, event_id)"
+                " WHERE"
+                "   ep.room_id NOT IN ("
+                "     SELECT room_id FROM receipts_linearized"
+                "       WHERE receipt_type = 'm.read' AND user_id = ?"
+                "       GROUP BY room_id"
+                "   )"
+                "   AND ep.user_id = ?"
+                "   AND ep.stream_ordering > ?"
+                "   AND ep.stream_ordering <= ?"
+                " ORDER BY ep.stream_ordering DESC LIMIT ?"
+            )
+            args = [
+                user_id, user_id,
+                min_stream_ordering, max_stream_ordering, limit,
+            ]
             txn.execute(sql, args)
             return txn.fetchall()
         no_read_receipt = yield self.runInteraction(
-            "get_unread_push_actions_for_user_in_range", get_no_receipt
+            "get_unread_push_actions_for_user_in_range_email_nrr", get_no_receipt
         )
 
-        defer.returnValue([
+        # Make a list of dicts from the two sets of results.
+        notifs = [
             {
                 "event_id": row[0],
                 "room_id": row[1],
@@ -189,7 +326,16 @@ class EventPushActionsStore(SQLBaseStore):
                 "actions": json.loads(row[3]),
                 "received_ts": row[4],
             } for row in after_read_receipt + no_read_receipt
-        ])
+        ]
+
+        # Now sort it so it's ordered correctly, since currently it will
+        # contain results from the first query, correctly ordered, followed
+        # by results from the second query, but we want them all ordered
+        # by received_ts (most recent first)
+        notifs.sort(key=lambda r: -(r['received_ts'] or 0))
+
+        # Now return the first `limit`
+        defer.returnValue(notifs[:limit])
 
     @defer.inlineCallbacks
     def get_push_actions_for_user(self, user_id, before=None, limit=50):
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 4655669ba0..d2feee8dbb 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -23,9 +23,14 @@ from synapse.util.async import ObservableDeferred
 from synapse.util.logcontext import preserve_fn, PreserveLoggingContext
 from synapse.util.logutils import log_function
 from synapse.api.constants import EventTypes
+from synapse.api.errors import SynapseError
 
 from canonicaljson import encode_canonical_json
-from collections import deque, namedtuple
+from collections import deque, namedtuple, OrderedDict
+from functools import wraps
+
+import synapse
+import synapse.metrics
 
 
 import logging
@@ -35,6 +40,10 @@ import ujson as json
 logger = logging.getLogger(__name__)
 
 
+metrics = synapse.metrics.get_metrics_for(__name__)
+persist_event_counter = metrics.register_counter("persisted_events")
+
+
 def encode_json(json_object):
     if USE_FROZEN_DICTS:
         # ujson doesn't like frozen_dicts
@@ -139,8 +148,32 @@ class _EventPeristenceQueue(object):
             pass
 
 
+_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
+
+
+def _retry_on_integrity_error(func):
+    """Wraps a database function so that it gets retried on IntegrityError,
+    with `delete_existing=True` passed in.
+
+    Args:
+        func: function that returns a Deferred and accepts a `delete_existing` arg
+    """
+    @wraps(func)
+    @defer.inlineCallbacks
+    def f(self, *args, **kwargs):
+        try:
+            res = yield func(self, *args, **kwargs)
+        except self.database_engine.module.IntegrityError:
+            logger.exception("IntegrityError, retrying.")
+            res = yield func(self, *args, delete_existing=True, **kwargs)
+        defer.returnValue(res)
+
+    return f
+
+
 class EventsStore(SQLBaseStore):
     EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
+    EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
 
     def __init__(self, hs):
         super(EventsStore, self).__init__(hs)
@@ -148,6 +181,10 @@ class EventsStore(SQLBaseStore):
         self.register_background_update_handler(
             self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
         )
+        self.register_background_update_handler(
+            self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
+            self._background_reindex_fields_sender,
+        )
 
         self._event_persist_queue = _EventPeristenceQueue()
 
@@ -213,8 +250,10 @@ class EventsStore(SQLBaseStore):
 
         self._event_persist_queue.handle_queue(room_id, persisting_queue)
 
+    @_retry_on_integrity_error
     @defer.inlineCallbacks
-    def _persist_events(self, events_and_contexts, backfilled=False):
+    def _persist_events(self, events_and_contexts, backfilled=False,
+                        delete_existing=False):
         if not events_and_contexts:
             return
 
@@ -257,11 +296,15 @@ class EventsStore(SQLBaseStore):
                         self._persist_events_txn,
                         events_and_contexts=chunk,
                         backfilled=backfilled,
+                        delete_existing=delete_existing,
                     )
+                    persist_event_counter.inc_by(len(chunk))
 
+    @_retry_on_integrity_error
     @defer.inlineCallbacks
     @log_function
-    def _persist_event(self, event, context, current_state=None, backfilled=False):
+    def _persist_event(self, event, context, current_state=None, backfilled=False,
+                       delete_existing=False):
         try:
             with self._stream_id_gen.get_next() as stream_ordering:
                 with self._state_groups_id_gen.get_next() as state_group_id:
@@ -274,7 +317,9 @@ class EventsStore(SQLBaseStore):
                         context=context,
                         current_state=current_state,
                         backfilled=backfilled,
+                        delete_existing=delete_existing,
                     )
+                    persist_event_counter.inc()
         except _RollbackButIsFineException:
             pass
 
@@ -305,7 +350,7 @@ class EventsStore(SQLBaseStore):
         )
 
         if not events and not allow_none:
-            raise RuntimeError("Could not find event %s" % (event_id,))
+            raise SynapseError(404, "Could not find event %s" % (event_id,))
 
         defer.returnValue(events[0] if events else None)
 
@@ -335,18 +380,15 @@ class EventsStore(SQLBaseStore):
         defer.returnValue({e.event_id: e for e in events})
 
     @log_function
-    def _persist_event_txn(self, txn, event, context, current_state, backfilled=False):
+    def _persist_event_txn(self, txn, event, context, current_state, backfilled=False,
+                           delete_existing=False):
         # We purposefully do this first since if we include a `current_state`
         # key, we *want* to update the `current_state_events` table
         if current_state:
             txn.call_after(self._get_current_state_for_key.invalidate_all)
             txn.call_after(self.get_rooms_for_user.invalidate_all)
             txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
-            txn.call_after(
-                self.get_users_with_pushers_in_room.invalidate, (event.room_id,)
-            )
             txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,))
-            txn.call_after(self.get_room_name_and_aliases.invalidate, (event.room_id,))
 
             # Add an entry to the current_state_resets table to record the point
             # where we clobbered the current state
@@ -379,10 +421,38 @@ class EventsStore(SQLBaseStore):
             txn,
             [(event, context)],
             backfilled=backfilled,
+            delete_existing=delete_existing,
         )
 
     @log_function
-    def _persist_events_txn(self, txn, events_and_contexts, backfilled):
+    def _persist_events_txn(self, txn, events_and_contexts, backfilled,
+                            delete_existing=False):
+        """Insert some number of room events into the necessary database tables.
+
+        Rejected events are only inserted into the events table, the events_json table,
+        and the rejections table. Things reading from those table will need to check
+        whether the event was rejected.
+
+        If delete_existing is True then existing events will be purged from the
+        database before insertion. This is useful when retrying due to IntegrityError.
+        """
+        # Ensure that we don't have the same event twice.
+        # Pick the earliest non-outlier if there is one, else the earliest one.
+        new_events_and_contexts = OrderedDict()
+        for event, context in events_and_contexts:
+            prev_event_context = new_events_and_contexts.get(event.event_id)
+            if prev_event_context:
+                if not event.internal_metadata.is_outlier():
+                    if prev_event_context[0].internal_metadata.is_outlier():
+                        # To ensure correct ordering we pop, as OrderedDict is
+                        # ordered by first insertion.
+                        new_events_and_contexts.pop(event.event_id, None)
+                        new_events_and_contexts[event.event_id] = (event, context)
+            else:
+                new_events_and_contexts[event.event_id] = (event, context)
+
+        events_and_contexts = new_events_and_contexts.values()
+
         depth_updates = {}
         for event, context in events_and_contexts:
             # Remove the any existing cache entries for the event_ids
@@ -393,21 +463,11 @@ class EventsStore(SQLBaseStore):
                     event.room_id, event.internal_metadata.stream_ordering,
                 )
 
-            if not event.internal_metadata.is_outlier():
+            if not event.internal_metadata.is_outlier() and not context.rejected:
                 depth_updates[event.room_id] = max(
                     event.depth, depth_updates.get(event.room_id, event.depth)
                 )
 
-            if context.push_actions:
-                self._set_push_actions_for_event_and_users_txn(
-                    txn, event, context.push_actions
-                )
-
-        if event.type == EventTypes.Redaction and event.redacts is not None:
-            self._remove_push_actions_for_event_id_txn(
-                txn, event.room_id, event.redacts
-            )
-
         for room_id, depth in depth_updates.items():
             self._update_min_depth_for_room_txn(txn, room_id, depth)
 
@@ -417,30 +477,21 @@ class EventsStore(SQLBaseStore):
             ),
             [event.event_id for event, _ in events_and_contexts]
         )
+
         have_persisted = {
             event_id: outlier
             for event_id, outlier in txn.fetchall()
         }
 
-        event_map = {}
         to_remove = set()
         for event, context in events_and_contexts:
-            # Handle the case of the list including the same event multiple
-            # times. The tricky thing here is when they differ by whether
-            # they are an outlier.
-            if event.event_id in event_map:
-                other = event_map[event.event_id]
-
-                if not other.internal_metadata.is_outlier():
-                    to_remove.add(event)
-                    continue
-                elif not event.internal_metadata.is_outlier():
+            if context.rejected:
+                # If the event is rejected then we don't care if the event
+                # was an outlier or not.
+                if event.event_id in have_persisted:
+                    # If we have already seen the event then ignore it.
                     to_remove.add(event)
-                    continue
-                else:
-                    to_remove.add(other)
-
-            event_map[event.event_id] = event
+                continue
 
             if event.event_id not in have_persisted:
                 continue
@@ -449,6 +500,12 @@ class EventsStore(SQLBaseStore):
 
             outlier_persisted = have_persisted[event.event_id]
             if not event.internal_metadata.is_outlier() and outlier_persisted:
+                # We received a copy of an event that we had already stored as
+                # an outlier in the database. We now have some state at that
+                # so we need to update the state_groups table with that state.
+
+                # insert into the state_group, state_groups_state and
+                # event_to_state_groups tables.
                 self._store_mult_state_groups_txn(txn, ((event, context),))
 
                 metadata_json = encode_json(
@@ -464,6 +521,8 @@ class EventsStore(SQLBaseStore):
                     (metadata_json, event.event_id,)
                 )
 
+                # Add an entry to the ex_outlier_stream table to replicate the
+                # change in outlier status to our workers.
                 stream_order = event.internal_metadata.stream_ordering
                 state_group_id = context.state_group or context.new_state_group_id
                 self._simple_insert_txn(
@@ -485,6 +544,8 @@ class EventsStore(SQLBaseStore):
                     (False, event.event_id,)
                 )
 
+                # Update the event_backward_extremities table now that this
+                # event isn't an outlier any more.
                 self._update_extremeties(txn, [event])
 
         events_and_contexts = [
@@ -492,38 +553,12 @@ class EventsStore(SQLBaseStore):
         ]
 
         if not events_and_contexts:
+            # Make sure we don't pass an empty list to functions that expect to
+            # be storing at least one element.
             return
 
-        self._store_mult_state_groups_txn(txn, events_and_contexts)
-
-        self._handle_mult_prev_events(
-            txn,
-            events=[event for event, _ in events_and_contexts],
-        )
-
-        for event, _ in events_and_contexts:
-            if event.type == EventTypes.Name:
-                self._store_room_name_txn(txn, event)
-            elif event.type == EventTypes.Topic:
-                self._store_room_topic_txn(txn, event)
-            elif event.type == EventTypes.Message:
-                self._store_room_message_txn(txn, event)
-            elif event.type == EventTypes.Redaction:
-                self._store_redaction(txn, event)
-            elif event.type == EventTypes.RoomHistoryVisibility:
-                self._store_history_visibility_txn(txn, event)
-            elif event.type == EventTypes.GuestAccess:
-                self._store_guest_access_txn(txn, event)
-
-        self._store_room_members_txn(
-            txn,
-            [
-                event
-                for event, _ in events_and_contexts
-                if event.type == EventTypes.Member
-            ],
-            backfilled=backfilled,
-        )
+        # From this point onwards the events are only events that we haven't
+        # seen before.
 
         def event_dict(event):
             return {
@@ -535,6 +570,43 @@ class EventsStore(SQLBaseStore):
                 ]
             }
 
+        if delete_existing:
+            # For paranoia reasons, we go and delete all the existing entries
+            # for these events so we can reinsert them.
+            # This gets around any problems with some tables already having
+            # entries.
+
+            logger.info("Deleting existing")
+
+            for table in (
+                "events",
+                "event_auth",
+                "event_json",
+                "event_content_hashes",
+                "event_destinations",
+                "event_edge_hashes",
+                "event_edges",
+                "event_forward_extremities",
+                "event_push_actions",
+                "event_reference_hashes",
+                "event_search",
+                "event_signatures",
+                "event_to_state_groups",
+                "guest_access",
+                "history_visibility",
+                "local_invites",
+                "room_names",
+                "state_events",
+                "rejections",
+                "redactions",
+                "room_memberships",
+                "state_events"
+            ):
+                txn.executemany(
+                    "DELETE FROM %s WHERE event_id = ?" % (table,),
+                    [(ev.event_id,) for ev, _ in events_and_contexts]
+                )
+
         self._simple_insert_many_txn(
             txn,
             table="event_json",
@@ -567,15 +639,51 @@ class EventsStore(SQLBaseStore):
                     "content": encode_json(event.content).decode("UTF-8"),
                     "origin_server_ts": int(event.origin_server_ts),
                     "received_ts": self._clock.time_msec(),
+                    "sender": event.sender,
+                    "contains_url": (
+                        "url" in event.content
+                        and isinstance(event.content["url"], basestring)
+                    ),
                 }
                 for event, _ in events_and_contexts
             ],
         )
 
-        if context.rejected:
-            self._store_rejections_txn(
-                txn, event.event_id, context.rejected
-            )
+        # Remove the rejected events from the list now that we've added them
+        # to the events table and the events_json table.
+        to_remove = set()
+        for event, context in events_and_contexts:
+            if context.rejected:
+                # Insert the event_id into the rejections table
+                self._store_rejections_txn(
+                    txn, event.event_id, context.rejected
+                )
+                to_remove.add(event)
+
+        events_and_contexts = [
+            ec for ec in events_and_contexts if ec[0] not in to_remove
+        ]
+
+        if not events_and_contexts:
+            # Make sure we don't pass an empty list to functions that expect to
+            # be storing at least one element.
+            return
+
+        # From this point onwards the events are only ones that weren't rejected.
+
+        for event, context in events_and_contexts:
+            # Insert all the push actions into the event_push_actions table.
+            if context.push_actions:
+                self._set_push_actions_for_event_and_users_txn(
+                    txn, event, context.push_actions
+                )
+
+            if event.type == EventTypes.Redaction and event.redacts is not None:
+                # Remove the entries in the event_push_actions table for the
+                # redacted event.
+                self._remove_push_actions_for_event_id_txn(
+                    txn, event.room_id, event.redacts
+                )
 
         self._simple_insert_many_txn(
             txn,
@@ -591,6 +699,49 @@ class EventsStore(SQLBaseStore):
             ],
         )
 
+        # Insert into the state_groups, state_groups_state, and
+        # event_to_state_groups tables.
+        self._store_mult_state_groups_txn(txn, events_and_contexts)
+
+        # Update the event_forward_extremities, event_backward_extremities and
+        # event_edges tables.
+        self._handle_mult_prev_events(
+            txn,
+            events=[event for event, _ in events_and_contexts],
+        )
+
+        for event, _ in events_and_contexts:
+            if event.type == EventTypes.Name:
+                # Insert into the room_names and event_search tables.
+                self._store_room_name_txn(txn, event)
+            elif event.type == EventTypes.Topic:
+                # Insert into the topics table and event_search table.
+                self._store_room_topic_txn(txn, event)
+            elif event.type == EventTypes.Message:
+                # Insert into the event_search table.
+                self._store_room_message_txn(txn, event)
+            elif event.type == EventTypes.Redaction:
+                # Insert into the redactions table.
+                self._store_redaction(txn, event)
+            elif event.type == EventTypes.RoomHistoryVisibility:
+                # Insert into the event_search table.
+                self._store_history_visibility_txn(txn, event)
+            elif event.type == EventTypes.GuestAccess:
+                # Insert into the event_search table.
+                self._store_guest_access_txn(txn, event)
+
+        # Insert into the room_memberships table.
+        self._store_room_members_txn(
+            txn,
+            [
+                event
+                for event, _ in events_and_contexts
+                if event.type == EventTypes.Member
+            ],
+            backfilled=backfilled,
+        )
+
+        # Insert event_reference_hashes table.
         self._store_event_reference_hashes_txn(
             txn, [event for event, _ in events_and_contexts]
         )
@@ -635,6 +786,9 @@ class EventsStore(SQLBaseStore):
             ],
         )
 
+        # Prefill the event cache
+        self._add_to_cache(txn, events_and_contexts)
+
         if backfilled:
             # Backfilled events come before the current state so we don't need
             # to update the current state table
@@ -645,22 +799,11 @@ class EventsStore(SQLBaseStore):
                 # Outlier events shouldn't clobber the current state.
                 continue
 
-            if context.rejected:
-                # If the event failed it's auth checks then it shouldn't
-                # clobbler the current state.
-                continue
-
             txn.call_after(
                 self._get_current_state_for_key.invalidate,
                 (event.room_id, event.type, event.state_key,)
             )
 
-            if event.type in [EventTypes.Name, EventTypes.Aliases]:
-                txn.call_after(
-                    self.get_room_name_and_aliases.invalidate,
-                    (event.room_id,)
-                )
-
             self._simple_upsert_txn(
                 txn,
                 "current_state_events",
@@ -676,6 +819,45 @@ class EventsStore(SQLBaseStore):
 
         return
 
+    def _add_to_cache(self, txn, events_and_contexts):
+        to_prefill = []
+
+        rows = []
+        N = 200
+        for i in range(0, len(events_and_contexts), N):
+            ev_map = {
+                e[0].event_id: e[0]
+                for e in events_and_contexts[i:i + N]
+            }
+            if not ev_map:
+                break
+
+            sql = (
+                "SELECT "
+                " e.event_id as event_id, "
+                " r.redacts as redacts,"
+                " rej.event_id as rejects "
+                " FROM events as e"
+                " LEFT JOIN rejections as rej USING (event_id)"
+                " LEFT JOIN redactions as r ON e.event_id = r.redacts"
+                " WHERE e.event_id IN (%s)"
+            ) % (",".join(["?"] * len(ev_map)),)
+
+            txn.execute(sql, ev_map.keys())
+            rows = self.cursor_to_dict(txn)
+            for row in rows:
+                event = ev_map[row["event_id"]]
+                if not row["rejects"] and not row["redacts"]:
+                    to_prefill.append(_EventCacheEntry(
+                        event=event,
+                        redacted_event=None,
+                    ))
+
+        def prefill():
+            for cache_entry in to_prefill:
+                self._get_event_cache.prefill((cache_entry[0].event_id,), cache_entry)
+        txn.call_after(prefill)
+
     def _store_redaction(self, txn, event):
         # invalidate the cache for the redacted event
         txn.call_after(self._invalidate_get_event_cache, event.redacts)
@@ -741,100 +923,65 @@ class EventsStore(SQLBaseStore):
         event_id_list = event_ids
         event_ids = set(event_ids)
 
-        event_map = self._get_events_from_cache(
+        event_entry_map = self._get_events_from_cache(
             event_ids,
-            check_redacted=check_redacted,
-            get_prev_content=get_prev_content,
             allow_rejected=allow_rejected,
         )
 
-        missing_events_ids = [e for e in event_ids if e not in event_map]
+        missing_events_ids = [e for e in event_ids if e not in event_entry_map]
 
         if missing_events_ids:
             missing_events = yield self._enqueue_events(
                 missing_events_ids,
                 check_redacted=check_redacted,
-                get_prev_content=get_prev_content,
                 allow_rejected=allow_rejected,
             )
 
-            event_map.update(missing_events)
-
-        defer.returnValue([
-            event_map[e_id] for e_id in event_id_list
-            if e_id in event_map and event_map[e_id]
-        ])
+            event_entry_map.update(missing_events)
 
-    def _get_events_txn(self, txn, event_ids, check_redacted=True,
-                        get_prev_content=False, allow_rejected=False):
-        if not event_ids:
-            return []
-
-        event_map = self._get_events_from_cache(
-            event_ids,
-            check_redacted=check_redacted,
-            get_prev_content=get_prev_content,
-            allow_rejected=allow_rejected,
-        )
-
-        missing_events_ids = [e for e in event_ids if e not in event_map]
+        events = []
+        for event_id in event_id_list:
+            entry = event_entry_map.get(event_id, None)
+            if not entry:
+                continue
 
-        if not missing_events_ids:
-            return [
-                event_map[e_id] for e_id in event_ids
-                if e_id in event_map and event_map[e_id]
-            ]
+            if allow_rejected or not entry.event.rejected_reason:
+                if check_redacted and entry.redacted_event:
+                    event = entry.redacted_event
+                else:
+                    event = entry.event
 
-        missing_events = self._fetch_events_txn(
-            txn,
-            missing_events_ids,
-            check_redacted=check_redacted,
-            get_prev_content=get_prev_content,
-            allow_rejected=allow_rejected,
-        )
+                events.append(event)
 
-        event_map.update(missing_events)
+                if get_prev_content:
+                    if "replaces_state" in event.unsigned:
+                        prev = yield self.get_event(
+                            event.unsigned["replaces_state"],
+                            get_prev_content=False,
+                            allow_none=True,
+                        )
+                        if prev:
+                            event.unsigned = dict(event.unsigned)
+                            event.unsigned["prev_content"] = prev.content
+                            event.unsigned["prev_sender"] = prev.sender
 
-        return [
-            event_map[e_id] for e_id in event_ids
-            if e_id in event_map and event_map[e_id]
-        ]
+        defer.returnValue(events)
 
     def _invalidate_get_event_cache(self, event_id):
-        for check_redacted in (False, True):
-            for get_prev_content in (False, True):
-                self._get_event_cache.invalidate(
-                    (event_id, check_redacted, get_prev_content)
-                )
-
-    def _get_event_txn(self, txn, event_id, check_redacted=True,
-                       get_prev_content=False, allow_rejected=False):
+            self._get_event_cache.invalidate((event_id,))
 
-        events = self._get_events_txn(
-            txn, [event_id],
-            check_redacted=check_redacted,
-            get_prev_content=get_prev_content,
-            allow_rejected=allow_rejected,
-        )
-
-        return events[0] if events else None
-
-    def _get_events_from_cache(self, events, check_redacted, get_prev_content,
-                               allow_rejected):
+    def _get_events_from_cache(self, events, allow_rejected):
         event_map = {}
 
         for event_id in events:
-            try:
-                ret = self._get_event_cache.get(
-                    (event_id, check_redacted, get_prev_content,)
-                )
+            ret = self._get_event_cache.get((event_id,), None)
+            if not ret:
+                continue
 
-                if allow_rejected or not ret.rejected_reason:
-                    event_map[event_id] = ret
-                else:
-                    event_map[event_id] = None
-            except KeyError:
-                pass
+            if allow_rejected or not ret.event.rejected_reason:
+                event_map[event_id] = ret
+            else:
+                event_map[event_id] = None
 
         return event_map
 
@@ -905,8 +1052,7 @@ class EventsStore(SQLBaseStore):
                         reactor.callFromThread(fire, event_list)
 
     @defer.inlineCallbacks
-    def _enqueue_events(self, events, check_redacted=True,
-                        get_prev_content=False, allow_rejected=False):
+    def _enqueue_events(self, events, check_redacted=True, allow_rejected=False):
         """Fetches events from the database using the _event_fetch_list. This
         allows batch and bulk fetching of events - it allows us to fetch events
         without having to create a new transaction for each request for events.
@@ -944,8 +1090,6 @@ class EventsStore(SQLBaseStore):
             [
                 preserve_fn(self._get_event_from_row)(
                     row["internal_metadata"], row["json"], row["redacts"],
-                    check_redacted=check_redacted,
-                    get_prev_content=get_prev_content,
                     rejected_reason=row["rejects"],
                 )
                 for row in rows
@@ -954,7 +1098,7 @@ class EventsStore(SQLBaseStore):
         )
 
         defer.returnValue({
-            e.event_id: e
+            e.event.event_id: e
             for e in res if e
         })
 
@@ -984,37 +1128,8 @@ class EventsStore(SQLBaseStore):
 
         return rows
 
-    def _fetch_events_txn(self, txn, events, check_redacted=True,
-                          get_prev_content=False, allow_rejected=False):
-        if not events:
-            return {}
-
-        rows = self._fetch_event_rows(
-            txn, events,
-        )
-
-        if not allow_rejected:
-            rows[:] = [r for r in rows if not r["rejects"]]
-
-        res = [
-            self._get_event_from_row_txn(
-                txn,
-                row["internal_metadata"], row["json"], row["redacts"],
-                check_redacted=check_redacted,
-                get_prev_content=get_prev_content,
-                rejected_reason=row["rejects"],
-            )
-            for row in rows
-        ]
-
-        return {
-            r.event_id: r
-            for r in res
-        }
-
     @defer.inlineCallbacks
     def _get_event_from_row(self, internal_metadata, js, redacted,
-                            check_redacted=True, get_prev_content=False,
                             rejected_reason=None):
         d = json.loads(js)
         internal_metadata = json.loads(internal_metadata)
@@ -1024,26 +1139,27 @@ class EventsStore(SQLBaseStore):
                 table="rejections",
                 keyvalues={"event_id": rejected_reason},
                 retcol="reason",
-                desc="_get_event_from_row",
+                desc="_get_event_from_row_rejected_reason",
             )
 
-        ev = FrozenEvent(
+        original_ev = FrozenEvent(
             d,
             internal_metadata_dict=internal_metadata,
             rejected_reason=rejected_reason,
         )
 
-        if check_redacted and redacted:
-            ev = prune_event(ev)
+        redacted_event = None
+        if redacted:
+            redacted_event = prune_event(original_ev)
 
             redaction_id = yield self._simple_select_one_onecol(
                 table="redactions",
-                keyvalues={"redacts": ev.event_id},
+                keyvalues={"redacts": redacted_event.event_id},
                 retcol="event_id",
-                desc="_get_event_from_row",
+                desc="_get_event_from_row_redactions",
             )
 
-            ev.unsigned["redacted_by"] = redaction_id
+            redacted_event.unsigned["redacted_by"] = redaction_id
             # Get the redaction event.
 
             because = yield self.get_event(
@@ -1055,86 +1171,16 @@ class EventsStore(SQLBaseStore):
             if because:
                 # It's fine to do add the event directly, since get_pdu_json
                 # will serialise this field correctly
-                ev.unsigned["redacted_because"] = because
-
-        if get_prev_content and "replaces_state" in ev.unsigned:
-            prev = yield self.get_event(
-                ev.unsigned["replaces_state"],
-                get_prev_content=False,
-                allow_none=True,
-            )
-            if prev:
-                ev.unsigned["prev_content"] = prev.content
-                ev.unsigned["prev_sender"] = prev.sender
-
-        self._get_event_cache.prefill(
-            (ev.event_id, check_redacted, get_prev_content), ev
-        )
-
-        defer.returnValue(ev)
+                redacted_event.unsigned["redacted_because"] = because
 
-    def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted,
-                                check_redacted=True, get_prev_content=False,
-                                rejected_reason=None):
-        d = json.loads(js)
-        internal_metadata = json.loads(internal_metadata)
-
-        if rejected_reason:
-            rejected_reason = self._simple_select_one_onecol_txn(
-                txn,
-                table="rejections",
-                keyvalues={"event_id": rejected_reason},
-                retcol="reason",
-            )
-
-        ev = FrozenEvent(
-            d,
-            internal_metadata_dict=internal_metadata,
-            rejected_reason=rejected_reason,
+        cache_entry = _EventCacheEntry(
+            event=original_ev,
+            redacted_event=redacted_event,
         )
 
-        if check_redacted and redacted:
-            ev = prune_event(ev)
-
-            redaction_id = self._simple_select_one_onecol_txn(
-                txn,
-                table="redactions",
-                keyvalues={"redacts": ev.event_id},
-                retcol="event_id",
-            )
+        self._get_event_cache.prefill((original_ev.event_id,), cache_entry)
 
-            ev.unsigned["redacted_by"] = redaction_id
-            # Get the redaction event.
-
-            because = self._get_event_txn(
-                txn,
-                redaction_id,
-                check_redacted=False
-            )
-
-            if because:
-                ev.unsigned["redacted_because"] = because
-
-        if get_prev_content and "replaces_state" in ev.unsigned:
-            prev = self._get_event_txn(
-                txn,
-                ev.unsigned["replaces_state"],
-                get_prev_content=False,
-            )
-            if prev:
-                ev.unsigned["prev_content"] = prev.content
-                ev.unsigned["prev_sender"] = prev.sender
-
-        self._get_event_cache.prefill(
-            (ev.event_id, check_redacted, get_prev_content), ev
-        )
-
-        return ev
-
-    def _parse_events_txn(self, txn, rows):
-        event_ids = [r["event_id"] for r in rows]
-
-        return self._get_events_txn(txn, event_ids)
+        defer.returnValue(cache_entry)
 
     @defer.inlineCallbacks
     def count_daily_messages(self):
@@ -1208,6 +1254,78 @@ class EventsStore(SQLBaseStore):
         defer.returnValue(ret)
 
     @defer.inlineCallbacks
+    def _background_reindex_fields_sender(self, progress, batch_size):
+        target_min_stream_id = progress["target_min_stream_id_inclusive"]
+        max_stream_id = progress["max_stream_id_exclusive"]
+        rows_inserted = progress.get("rows_inserted", 0)
+
+        INSERT_CLUMP_SIZE = 1000
+
+        def reindex_txn(txn):
+            sql = (
+                "SELECT stream_ordering, event_id, json FROM events"
+                " INNER JOIN event_json USING (event_id)"
+                " WHERE ? <= stream_ordering AND stream_ordering < ?"
+                " ORDER BY stream_ordering DESC"
+                " LIMIT ?"
+            )
+
+            txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
+
+            rows = txn.fetchall()
+            if not rows:
+                return 0
+
+            min_stream_id = rows[-1][0]
+
+            update_rows = []
+            for row in rows:
+                try:
+                    event_id = row[1]
+                    event_json = json.loads(row[2])
+                    sender = event_json["sender"]
+                    content = event_json["content"]
+
+                    contains_url = "url" in content
+                    if contains_url:
+                        contains_url &= isinstance(content["url"], basestring)
+                except (KeyError, AttributeError):
+                    # If the event is missing a necessary field then
+                    # skip over it.
+                    continue
+
+                update_rows.append((sender, contains_url, event_id))
+
+            sql = (
+                "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?"
+            )
+
+            for index in range(0, len(update_rows), INSERT_CLUMP_SIZE):
+                clump = update_rows[index:index + INSERT_CLUMP_SIZE]
+                txn.executemany(sql, clump)
+
+            progress = {
+                "target_min_stream_id_inclusive": target_min_stream_id,
+                "max_stream_id_exclusive": min_stream_id,
+                "rows_inserted": rows_inserted + len(rows)
+            }
+
+            self._background_update_progress_txn(
+                txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
+            )
+
+            return len(rows)
+
+        result = yield self.runInteraction(
+            self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
+        )
+
+        if not result:
+            yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME)
+
+        defer.returnValue(result)
+
+    @defer.inlineCallbacks
     def _background_reindex_origin_server_ts(self, progress, batch_size):
         target_min_stream_id = progress["target_min_stream_id_inclusive"]
         max_stream_id = progress["max_stream_id_exclusive"]
@@ -1374,6 +1492,162 @@ class EventsStore(SQLBaseStore):
             )
         return self.runInteraction("get_all_new_events", get_all_new_events_txn)
 
+    def delete_old_state(self, room_id, topological_ordering):
+        return self.runInteraction(
+            "delete_old_state",
+            self._delete_old_state_txn, room_id, topological_ordering
+        )
+
+    def _delete_old_state_txn(self, txn, room_id, topological_ordering):
+        """Deletes old room state
+        """
+
+        # Tables that should be pruned:
+        #     event_auth
+        #     event_backward_extremities
+        #     event_content_hashes
+        #     event_destinations
+        #     event_edge_hashes
+        #     event_edges
+        #     event_forward_extremities
+        #     event_json
+        #     event_push_actions
+        #     event_reference_hashes
+        #     event_search
+        #     event_signatures
+        #     event_to_state_groups
+        #     events
+        #     rejections
+        #     room_depth
+        #     state_groups
+        #     state_groups_state
+
+        # First ensure that we're not about to delete all the forward extremeties
+        txn.execute(
+            "SELECT e.event_id, e.depth FROM events as e "
+            "INNER JOIN event_forward_extremities as f "
+            "ON e.event_id = f.event_id "
+            "AND e.room_id = f.room_id "
+            "WHERE f.room_id = ?",
+            (room_id,)
+        )
+        rows = txn.fetchall()
+        max_depth = max(row[0] for row in rows)
+
+        if max_depth <= topological_ordering:
+            # We need to ensure we don't delete all the events from the datanase
+            # otherwise we wouldn't be able to send any events (due to not
+            # having any backwards extremeties)
+            raise SynapseError(
+                400, "topological_ordering is greater than forward extremeties"
+            )
+
+        txn.execute(
+            "SELECT event_id, state_key FROM events"
+            " LEFT JOIN state_events USING (room_id, event_id)"
+            " WHERE room_id = ? AND topological_ordering < ?",
+            (room_id, topological_ordering,)
+        )
+        event_rows = txn.fetchall()
+
+        # We calculate the new entries for the backward extremeties by finding
+        # all events that point to events that are to be purged
+        txn.execute(
+            "SELECT DISTINCT e.event_id FROM events as e"
+            " INNER JOIN event_edges as ed ON e.event_id = ed.prev_event_id"
+            " INNER JOIN events as e2 ON e2.event_id = ed.event_id"
+            " WHERE e.room_id = ? AND e.topological_ordering < ?"
+            " AND e2.topological_ordering >= ?",
+            (room_id, topological_ordering, topological_ordering)
+        )
+        new_backwards_extrems = txn.fetchall()
+
+        txn.execute(
+            "DELETE FROM event_backward_extremities WHERE room_id = ?",
+            (room_id,)
+        )
+
+        # Update backward extremeties
+        txn.executemany(
+            "INSERT INTO event_backward_extremities (room_id, event_id)"
+            " VALUES (?, ?)",
+            [
+                (room_id, event_id) for event_id, in new_backwards_extrems
+            ]
+        )
+
+        # Get all state groups that are only referenced by events that are
+        # to be deleted.
+        txn.execute(
+            "SELECT state_group FROM event_to_state_groups"
+            " INNER JOIN events USING (event_id)"
+            " WHERE state_group IN ("
+            "   SELECT DISTINCT state_group FROM events"
+            "   INNER JOIN event_to_state_groups USING (event_id)"
+            "   WHERE room_id = ? AND topological_ordering < ?"
+            " )"
+            " GROUP BY state_group HAVING MAX(topological_ordering) < ?",
+            (room_id, topological_ordering, topological_ordering)
+        )
+        state_rows = txn.fetchall()
+        txn.executemany(
+            "DELETE FROM state_groups_state WHERE state_group = ?",
+            state_rows
+        )
+        txn.executemany(
+            "DELETE FROM state_groups WHERE id = ?",
+            state_rows
+        )
+        # Delete all non-state
+        txn.executemany(
+            "DELETE FROM event_to_state_groups WHERE event_id = ?",
+            [(event_id,) for event_id, _ in event_rows]
+        )
+
+        txn.execute(
+            "UPDATE room_depth SET min_depth = ? WHERE room_id = ?",
+            (topological_ordering, room_id,)
+        )
+
+        # Delete all remote non-state events
+        to_delete = [
+            (event_id,) for event_id, state_key in event_rows
+            if state_key is None and not self.hs.is_mine_id(event_id)
+        ]
+        for table in (
+            "events",
+            "event_json",
+            "event_auth",
+            "event_content_hashes",
+            "event_destinations",
+            "event_edge_hashes",
+            "event_edges",
+            "event_forward_extremities",
+            "event_push_actions",
+            "event_reference_hashes",
+            "event_search",
+            "event_signatures",
+            "rejections",
+        ):
+            txn.executemany(
+                "DELETE FROM %s WHERE event_id = ?" % (table,),
+                to_delete
+            )
+
+        txn.executemany(
+            "DELETE FROM events WHERE event_id = ?",
+            to_delete
+        )
+        # Mark all state and own events as outliers
+        txn.executemany(
+            "UPDATE events SET outlier = ?"
+            " WHERE event_id = ?",
+            [
+                (True, event_id,) for event_id, state_key in event_rows
+                if state_key is not None or self.hs.is_mine_id(event_id)
+            ]
+        )
+
 
 AllNewEventsResult = namedtuple("AllNewEventsResult", [
     "new_forward_events", "new_backfill_events",
diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py
index a495a8a7d9..86b37b9ddd 100644
--- a/synapse/storage/keys.py
+++ b/synapse/storage/keys.py
@@ -22,6 +22,10 @@ import OpenSSL
 from signedjson.key import decode_verify_key_bytes
 import hashlib
 
+import logging
+
+logger = logging.getLogger(__name__)
+
 
 class KeyStore(SQLBaseStore):
     """Persistence for signature verification keys and tls X.509 certificates
@@ -74,22 +78,22 @@ class KeyStore(SQLBaseStore):
         )
 
     @cachedInlineCallbacks()
-    def get_all_server_verify_keys(self, server_name):
-        rows = yield self._simple_select_list(
+    def _get_server_verify_key(self, server_name, key_id):
+        verify_key_bytes = yield self._simple_select_one_onecol(
             table="server_signature_keys",
             keyvalues={
                 "server_name": server_name,
+                "key_id": key_id,
             },
-            retcols=["key_id", "verify_key"],
-            desc="get_all_server_verify_keys",
+            retcol="verify_key",
+            desc="_get_server_verify_key",
+            allow_none=True,
         )
 
-        defer.returnValue({
-            row["key_id"]: decode_verify_key_bytes(
-                row["key_id"], str(row["verify_key"])
-            )
-            for row in rows
-        })
+        if verify_key_bytes:
+            defer.returnValue(decode_verify_key_bytes(
+                key_id, str(verify_key_bytes)
+            ))
 
     @defer.inlineCallbacks
     def get_server_verify_keys(self, server_name, key_ids):
@@ -101,12 +105,12 @@ class KeyStore(SQLBaseStore):
         Returns:
             (list of VerifyKey): The verification keys.
         """
-        keys = yield self.get_all_server_verify_keys(server_name)
-        defer.returnValue({
-            k: keys[k]
-            for k in key_ids
-            if k in keys and keys[k]
-        })
+        keys = {}
+        for key_id in key_ids:
+            key = yield self._get_server_verify_key(server_name, key_id)
+            if key:
+                keys[key_id] = key
+        defer.returnValue(keys)
 
     @defer.inlineCallbacks
     def store_server_verify_key(self, server_name, from_server, time_now_ms,
@@ -133,8 +137,6 @@ class KeyStore(SQLBaseStore):
             desc="store_server_verify_key",
         )
 
-        self.get_all_server_verify_keys.invalidate((server_name,))
-
     def store_server_keys_json(self, server_name, key_id, from_server,
                                ts_now_ms, ts_expires_ms, key_json_bytes):
         """Stores the JSON bytes for a set of keys from a server
diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py
index a820fcf07f..4c0f82353d 100644
--- a/synapse/storage/media_repository.py
+++ b/synapse/storage/media_repository.py
@@ -157,10 +157,25 @@ class MediaRepositoryStore(SQLBaseStore):
                 "created_ts": time_now_ms,
                 "upload_name": upload_name,
                 "filesystem_id": filesystem_id,
+                "last_access_ts": time_now_ms,
             },
             desc="store_cached_remote_media",
         )
 
+    def update_cached_last_access_time(self, origin_id_tuples, time_ts):
+        def update_cache_txn(txn):
+            sql = (
+                "UPDATE remote_media_cache SET last_access_ts = ?"
+                " WHERE media_origin = ? AND media_id = ?"
+            )
+
+            txn.executemany(sql, (
+                (time_ts, media_origin, media_id)
+                for media_origin, media_id in origin_id_tuples
+            ))
+
+        return self.runInteraction("update_cached_last_access_time", update_cache_txn)
+
     def get_remote_media_thumbnails(self, origin, media_id):
         return self._simple_select_list(
             "remote_media_cache_thumbnails",
@@ -190,3 +205,32 @@ class MediaRepositoryStore(SQLBaseStore):
             },
             desc="store_remote_media_thumbnail",
         )
+
+    def get_remote_media_before(self, before_ts):
+        sql = (
+            "SELECT media_origin, media_id, filesystem_id"
+            " FROM remote_media_cache"
+            " WHERE last_access_ts < ?"
+        )
+
+        return self._execute(
+            "get_remote_media_before", self.cursor_to_dict, sql, before_ts
+        )
+
+    def delete_remote_media(self, media_origin, media_id):
+        def delete_remote_media_txn(txn):
+            self._simple_delete_txn(
+                txn,
+                "remote_media_cache",
+                keyvalues={
+                    "media_origin": media_origin, "media_id": media_id
+                },
+            )
+            self._simple_delete_txn(
+                txn,
+                "remote_media_cache_thumbnails",
+                keyvalues={
+                    "media_origin": media_origin, "media_id": media_id
+                },
+            )
+        return self.runInteraction("delete_remote_media", delete_remote_media_txn)
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index c8487c8838..8801669a6b 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
 
 # Remember to update this number every time a change is made to database
 # schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 32
+SCHEMA_VERSION = 33
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py
index 3fab57a7e8..d03f7c541e 100644
--- a/synapse/storage/presence.py
+++ b/synapse/storage/presence.py
@@ -118,6 +118,9 @@ class PresenceStore(SQLBaseStore):
             )
 
     def get_all_presence_updates(self, last_id, current_id):
+        if last_id == current_id:
+            return defer.succeed([])
+
         def get_all_presence_updates_txn(txn):
             sql = (
                 "SELECT stream_id, user_id, state, last_active_ts,"
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
index d2bf7f2aec..8183b7f1b0 100644
--- a/synapse/storage/push_rule.py
+++ b/synapse/storage/push_rule.py
@@ -14,7 +14,8 @@
 # limitations under the License.
 
 from ._base import SQLBaseStore
-from synapse.util.caches.descriptors import cachedInlineCallbacks
+from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
+from synapse.push.baserules import list_with_base_rules
 from twisted.internet import defer
 
 import logging
@@ -23,8 +24,31 @@ import simplejson as json
 logger = logging.getLogger(__name__)
 
 
+def _load_rules(rawrules, enabled_map):
+    ruleslist = []
+    for rawrule in rawrules:
+        rule = dict(rawrule)
+        rule["conditions"] = json.loads(rawrule["conditions"])
+        rule["actions"] = json.loads(rawrule["actions"])
+        ruleslist.append(rule)
+
+    # We're going to be mutating this a lot, so do a deep copy
+    rules = list(list_with_base_rules(ruleslist))
+
+    for i, rule in enumerate(rules):
+        rule_id = rule['rule_id']
+        if rule_id in enabled_map:
+            if rule.get('enabled', True) != bool(enabled_map[rule_id]):
+                # Rules are cached across users.
+                rule = dict(rule)
+                rule['enabled'] = bool(enabled_map[rule_id])
+                rules[i] = rule
+
+    return rules
+
+
 class PushRuleStore(SQLBaseStore):
-    @cachedInlineCallbacks()
+    @cachedInlineCallbacks(lru=True)
     def get_push_rules_for_user(self, user_id):
         rows = yield self._simple_select_list(
             table="push_rules",
@@ -42,9 +66,13 @@ class PushRuleStore(SQLBaseStore):
             key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))
         )
 
-        defer.returnValue(rows)
+        enabled_map = yield self.get_push_rules_enabled_for_user(user_id)
 
-    @cachedInlineCallbacks()
+        rules = _load_rules(rows, enabled_map)
+
+        defer.returnValue(rules)
+
+    @cachedInlineCallbacks(lru=True)
     def get_push_rules_enabled_for_user(self, user_id):
         results = yield self._simple_select_list(
             table="push_rules_enable",
@@ -60,12 +88,16 @@ class PushRuleStore(SQLBaseStore):
             r['rule_id']: False if r['enabled'] == 0 else True for r in results
         })
 
-    @defer.inlineCallbacks
+    @cachedList(cached_method_name="get_push_rules_for_user",
+                list_name="user_ids", num_args=1, inlineCallbacks=True)
     def bulk_get_push_rules(self, user_ids):
         if not user_ids:
             defer.returnValue({})
 
-        results = {}
+        results = {
+            user_id: []
+            for user_id in user_ids
+        }
 
         rows = yield self._simple_select_many_batch(
             table="push_rules",
@@ -75,18 +107,32 @@ class PushRuleStore(SQLBaseStore):
             desc="bulk_get_push_rules",
         )
 
-        rows.sort(key=lambda e: (-e["priority_class"], -e["priority"]))
+        rows.sort(
+            key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))
+        )
 
         for row in rows:
             results.setdefault(row['user_name'], []).append(row)
+
+        enabled_map_by_user = yield self.bulk_get_push_rules_enabled(user_ids)
+
+        for user_id, rules in results.items():
+            results[user_id] = _load_rules(
+                rules, enabled_map_by_user.get(user_id, {})
+            )
+
         defer.returnValue(results)
 
-    @defer.inlineCallbacks
+    @cachedList(cached_method_name="get_push_rules_enabled_for_user",
+                list_name="user_ids", num_args=1, inlineCallbacks=True)
     def bulk_get_push_rules_enabled(self, user_ids):
         if not user_ids:
             defer.returnValue({})
 
-        results = {}
+        results = {
+            user_id: {}
+            for user_id in user_ids
+        }
 
         rows = yield self._simple_select_many_batch(
             table="push_rules_enable",
@@ -96,7 +142,8 @@ class PushRuleStore(SQLBaseStore):
             desc="bulk_get_push_rules_enabled",
         )
         for row in rows:
-            results.setdefault(row['user_name'], {})[row['rule_id']] = row['enabled']
+            enabled = bool(row['enabled'])
+            results.setdefault(row['user_name'], {})[row['rule_id']] = enabled
         defer.returnValue(results)
 
     @defer.inlineCallbacks
@@ -374,6 +421,9 @@ class PushRuleStore(SQLBaseStore):
 
     def get_all_push_rule_updates(self, last_id, current_id, limit):
         """Get all the push rules changes that have happend on the server"""
+        if last_id == current_id:
+            return defer.succeed([])
+
         def get_all_push_rule_updates_txn(txn):
             sql = (
                 "SELECT stream_id, event_stream_ordering, user_id, rule_id,"
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index 9e8e2e2964..a7d7c54d7e 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -18,7 +18,7 @@ from twisted.internet import defer
 
 from canonicaljson import encode_canonical_json
 
-from synapse.util.caches.descriptors import cachedInlineCallbacks
+from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
 
 import logging
 import simplejson as json
@@ -135,19 +135,35 @@ class PusherStore(SQLBaseStore):
             "get_all_updated_pushers", get_all_updated_pushers_txn
         )
 
-    @cachedInlineCallbacks(num_args=1)
-    def get_users_with_pushers_in_room(self, room_id):
-        users = yield self.get_users_in_room(room_id)
-
+    @cachedInlineCallbacks(lru=True, num_args=1, max_entries=15000)
+    def get_if_user_has_pusher(self, user_id):
         result = yield self._simple_select_many_batch(
             table='pushers',
+            keyvalues={
+                'user_name': 'user_id',
+            },
+            retcol='user_name',
+            desc='get_if_user_has_pusher',
+            allow_none=True,
+        )
+
+        defer.returnValue(bool(result))
+
+    @cachedList(cached_method_name="get_if_user_has_pusher",
+                list_name="user_ids", num_args=1, inlineCallbacks=True)
+    def get_if_users_have_pushers(self, user_ids):
+        rows = yield self._simple_select_many_batch(
+            table='pushers',
             column='user_name',
-            iterable=users,
+            iterable=user_ids,
             retcols=['user_name'],
-            desc='get_users_with_pushers_in_room'
+            desc='get_if_users_have_pushers'
         )
 
-        defer.returnValue([r['user_name'] for r in result])
+        result = {user_id: False for user_id in user_ids}
+        result.update({r['user_name']: True for r in rows})
+
+        defer.returnValue(result)
 
     @defer.inlineCallbacks
     def add_pusher(self, user_id, access_token, kind, app_id,
@@ -178,16 +194,16 @@ class PusherStore(SQLBaseStore):
                     },
                 )
                 if newly_inserted:
-                    # get_users_with_pushers_in_room only cares if the user has
+                    # get_if_user_has_pusher only cares if the user has
                     # at least *one* pusher.
-                    txn.call_after(self.get_users_with_pushers_in_room.invalidate_all)
+                    txn.call_after(self.get_if_user_has_pusher.invalidate, (user_id,))
 
             yield self.runInteraction("add_pusher", f)
 
     @defer.inlineCallbacks
     def delete_pusher_by_app_id_pushkey_user_id(self, app_id, pushkey, user_id):
         def delete_pusher_txn(txn, stream_id):
-            txn.call_after(self.get_users_with_pushers_in_room.invalidate_all)
+            txn.call_after(self.get_if_user_has_pusher.invalidate, (user_id,))
 
             self._simple_delete_one_txn(
                 txn,
diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py
index d147a60602..cb4e04a679 100644
--- a/synapse/storage/receipts.py
+++ b/synapse/storage/receipts.py
@@ -34,6 +34,26 @@ class ReceiptsStore(SQLBaseStore):
             "ReceiptsRoomChangeCache", self._receipts_id_gen.get_current_token()
         )
 
+    @cachedInlineCallbacks()
+    def get_users_with_read_receipts_in_room(self, room_id):
+        receipts = yield self.get_receipts_for_room(room_id, "m.read")
+        defer.returnValue(set(r['user_id'] for r in receipts))
+
+    def _invalidate_get_users_with_receipts_in_room(self, room_id, receipt_type,
+                                                    user_id):
+        if receipt_type != "m.read":
+            return
+
+        # Returns an ObservableDeferred
+        res = self.get_users_with_read_receipts_in_room.cache.get((room_id,), None)
+
+        if res and res.called and user_id in res.result:
+            # We'd only be adding to the set, so no point invalidating if the
+            # user is already there
+            return
+
+        self.get_users_with_read_receipts_in_room.invalidate((room_id,))
+
     @cached(num_args=2)
     def get_receipts_for_room(self, room_id, receipt_type):
         return self._simple_select_list(
@@ -254,6 +274,10 @@ class ReceiptsStore(SQLBaseStore):
             self.get_receipts_for_room.invalidate, (room_id, receipt_type)
         )
         txn.call_after(
+            self._invalidate_get_users_with_receipts_in_room,
+            room_id, receipt_type, user_id,
+        )
+        txn.call_after(
             self.get_receipts_for_user.invalidate, (user_id, receipt_type)
         )
         # FIXME: This shouldn't invalidate the whole cache
@@ -399,6 +423,10 @@ class ReceiptsStore(SQLBaseStore):
             self.get_receipts_for_room.invalidate, (room_id, receipt_type)
         )
         txn.call_after(
+            self._invalidate_get_users_with_receipts_in_room,
+            room_id, receipt_type, user_id,
+        )
+        txn.call_after(
             self.get_receipts_for_user.invalidate, (user_id, receipt_type)
         )
         # FIXME: This shouldn't invalidate the whole cache
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index bda84a744a..7e7d32eb66 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -18,25 +18,40 @@ import re
 from twisted.internet import defer
 
 from synapse.api.errors import StoreError, Codes
-
-from ._base import SQLBaseStore
+from synapse.storage import background_updates
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 
 
-class RegistrationStore(SQLBaseStore):
+class RegistrationStore(background_updates.BackgroundUpdateStore):
 
     def __init__(self, hs):
         super(RegistrationStore, self).__init__(hs)
 
         self.clock = hs.get_clock()
 
+        self.register_background_index_update(
+            "access_tokens_device_index",
+            index_name="access_tokens_device_id",
+            table="access_tokens",
+            columns=["user_id", "device_id"],
+        )
+
+        self.register_background_index_update(
+            "refresh_tokens_device_index",
+            index_name="refresh_tokens_device_id",
+            table="refresh_tokens",
+            columns=["user_id", "device_id"],
+        )
+
     @defer.inlineCallbacks
-    def add_access_token_to_user(self, user_id, token):
+    def add_access_token_to_user(self, user_id, token, device_id=None):
         """Adds an access token for the given user.
 
         Args:
             user_id (str): The user ID.
             token (str): The new access token to add.
+            device_id (str): ID of the device to associate with the access
+               token
         Raises:
             StoreError if there was a problem adding this.
         """
@@ -47,18 +62,21 @@ class RegistrationStore(SQLBaseStore):
             {
                 "id": next_id,
                 "user_id": user_id,
-                "token": token
+                "token": token,
+                "device_id": device_id,
             },
             desc="add_access_token_to_user",
         )
 
     @defer.inlineCallbacks
-    def add_refresh_token_to_user(self, user_id, token):
+    def add_refresh_token_to_user(self, user_id, token, device_id=None):
         """Adds a refresh token for the given user.
 
         Args:
             user_id (str): The user ID.
             token (str): The new refresh token to add.
+            device_id (str): ID of the device to associate with the access
+               token
         Raises:
             StoreError if there was a problem adding this.
         """
@@ -69,25 +87,31 @@ class RegistrationStore(SQLBaseStore):
             {
                 "id": next_id,
                 "user_id": user_id,
-                "token": token
+                "token": token,
+                "device_id": device_id,
             },
             desc="add_refresh_token_to_user",
         )
 
     @defer.inlineCallbacks
-    def register(self, user_id, token, password_hash,
-                 was_guest=False, make_guest=False, appservice_id=None):
+    def register(self, user_id, token=None, password_hash=None,
+                 was_guest=False, make_guest=False, appservice_id=None,
+                 create_profile_with_localpart=None, admin=False):
         """Attempts to register an account.
 
         Args:
             user_id (str): The desired user ID to register.
-            token (str): The desired access token to use for this user.
+            token (str): The desired access token to use for this user. If this
+                is not None, the given access token is associated with the user
+                id.
             password_hash (str): Optional. The password hash for this user.
             was_guest (bool): Optional. Whether this is a guest account being
                 upgraded to a non-guest account.
             make_guest (boolean): True if the the new user should be guest,
                 false to add a regular user account.
             appservice_id (str): The ID of the appservice registering the user.
+            create_profile_with_localpart (str): Optionally create a profile for
+                the given localpart.
         Raises:
             StoreError if the user_id could not be registered.
         """
@@ -99,7 +123,9 @@ class RegistrationStore(SQLBaseStore):
             password_hash,
             was_guest,
             make_guest,
-            appservice_id
+            appservice_id,
+            create_profile_with_localpart,
+            admin
         )
         self.get_user_by_id.invalidate((user_id,))
         self.is_guest.invalidate((user_id,))
@@ -112,7 +138,9 @@ class RegistrationStore(SQLBaseStore):
         password_hash,
         was_guest,
         make_guest,
-        appservice_id
+        appservice_id,
+        create_profile_with_localpart,
+        admin,
     ):
         now = int(self.clock.time())
 
@@ -120,29 +148,48 @@ class RegistrationStore(SQLBaseStore):
 
         try:
             if was_guest:
-                txn.execute("UPDATE users SET"
-                            " password_hash = ?,"
-                            " upgrade_ts = ?,"
-                            " is_guest = ?"
-                            " WHERE name = ?",
-                            [password_hash, now, 1 if make_guest else 0, user_id])
+                # Ensure that the guest user actually exists
+                # ``allow_none=False`` makes this raise an exception
+                # if the row isn't in the database.
+                self._simple_select_one_txn(
+                    txn,
+                    "users",
+                    keyvalues={
+                        "name": user_id,
+                        "is_guest": 1,
+                    },
+                    retcols=("name",),
+                    allow_none=False,
+                )
+
+                self._simple_update_one_txn(
+                    txn,
+                    "users",
+                    keyvalues={
+                        "name": user_id,
+                        "is_guest": 1,
+                    },
+                    updatevalues={
+                        "password_hash": password_hash,
+                        "upgrade_ts": now,
+                        "is_guest": 1 if make_guest else 0,
+                        "appservice_id": appservice_id,
+                        "admin": 1 if admin else 0,
+                    }
+                )
             else:
-                txn.execute("INSERT INTO users "
-                            "("
-                            "   name,"
-                            "   password_hash,"
-                            "   creation_ts,"
-                            "   is_guest,"
-                            "   appservice_id"
-                            ") "
-                            "VALUES (?,?,?,?,?)",
-                            [
-                                user_id,
-                                password_hash,
-                                now,
-                                1 if make_guest else 0,
-                                appservice_id,
-                            ])
+                self._simple_insert_txn(
+                    txn,
+                    "users",
+                    values={
+                        "name": user_id,
+                        "password_hash": password_hash,
+                        "creation_ts": now,
+                        "is_guest": 1 if make_guest else 0,
+                        "appservice_id": appservice_id,
+                        "admin": 1 if admin else 0,
+                    }
+                )
         except self.database_engine.module.IntegrityError:
             raise StoreError(
                 400, "User ID already taken.", errcode=Codes.USER_IN_USE
@@ -157,6 +204,12 @@ class RegistrationStore(SQLBaseStore):
                 (next_id, user_id, token,)
             )
 
+        if create_profile_with_localpart:
+            txn.execute(
+                "INSERT INTO profiles(user_id) VALUES (?)",
+                (create_profile_with_localpart,)
+            )
+
     @cached()
     def get_user_by_id(self, user_id):
         return self._simple_select_one(
@@ -198,16 +251,37 @@ class RegistrationStore(SQLBaseStore):
         self.get_user_by_id.invalidate((user_id,))
 
     @defer.inlineCallbacks
-    def user_delete_access_tokens(self, user_id, except_token_ids=[]):
-        def f(txn):
-            sql = "SELECT token FROM access_tokens WHERE user_id = ?"
+    def user_delete_access_tokens(self, user_id, except_token_ids=[],
+                                  device_id=None,
+                                  delete_refresh_tokens=False):
+        """
+        Invalidate access/refresh tokens belonging to a user
+
+        Args:
+            user_id (str):  ID of user the tokens belong to
+            except_token_ids (list[str]): list of access_tokens which should
+                *not* be deleted
+            device_id (str|None):  ID of device the tokens are associated with.
+                If None, tokens associated with any device (or no device) will
+                be deleted
+            delete_refresh_tokens (bool):  True to delete refresh tokens as
+                well as access tokens.
+        Returns:
+            defer.Deferred:
+        """
+        def f(txn, table, except_tokens, call_after_delete):
+            sql = "SELECT token FROM %s WHERE user_id = ?" % table
             clauses = [user_id]
 
-            if except_token_ids:
+            if device_id is not None:
+                sql += " AND device_id = ?"
+                clauses.append(device_id)
+
+            if except_tokens:
                 sql += " AND id NOT IN (%s)" % (
-                    ",".join(["?" for _ in except_token_ids]),
+                    ",".join(["?" for _ in except_tokens]),
                 )
-                clauses += except_token_ids
+                clauses += except_tokens
 
             txn.execute(sql, clauses)
 
@@ -216,16 +290,33 @@ class RegistrationStore(SQLBaseStore):
             n = 100
             chunks = [rows[i:i + n] for i in xrange(0, len(rows), n)]
             for chunk in chunks:
-                for row in chunk:
-                    txn.call_after(self.get_user_by_access_token.invalidate, (row[0],))
+                if call_after_delete:
+                    for row in chunk:
+                        txn.call_after(call_after_delete, (row[0],))
 
                 txn.execute(
-                    "DELETE FROM access_tokens WHERE token in (%s)" % (
+                    "DELETE FROM %s WHERE token in (%s)" % (
+                        table,
                         ",".join(["?" for _ in chunk]),
                     ), [r[0] for r in chunk]
                 )
 
-        yield self.runInteraction("user_delete_access_tokens", f)
+        # delete refresh tokens first, to stop new access tokens being
+        # allocated while our backs are turned
+        if delete_refresh_tokens:
+            yield self.runInteraction(
+                "user_delete_access_tokens", f,
+                table="refresh_tokens",
+                except_tokens=[],
+                call_after_delete=None,
+            )
+
+        yield self.runInteraction(
+            "user_delete_access_tokens", f,
+            table="access_tokens",
+            except_tokens=except_token_ids,
+            call_after_delete=self.get_user_by_access_token.invalidate,
+        )
 
     def delete_access_token(self, access_token):
         def f(txn):
@@ -248,9 +339,8 @@ class RegistrationStore(SQLBaseStore):
         Args:
             token (str): The access token of a user.
         Returns:
-            dict: Including the name (user_id) and the ID of their access token.
-        Raises:
-            StoreError if no user was found.
+            defer.Deferred: None, if the token did not match, otherwise dict
+                including the keys `name`, `is_guest`, `device_id`, `token_id`.
         """
         return self.runInteraction(
             "get_user_by_access_token",
@@ -259,18 +349,18 @@ class RegistrationStore(SQLBaseStore):
         )
 
     def exchange_refresh_token(self, refresh_token, token_generator):
-        """Exchange a refresh token for a new access token and refresh token.
+        """Exchange a refresh token for a new one.
 
         Doing so invalidates the old refresh token - refresh tokens are single
         use.
 
         Args:
-            token (str): The refresh token of a user.
+            refresh_token (str): The refresh token of a user.
             token_generator (fn: str -> str): Function which, when given a
                 user ID, returns a unique refresh token for that user. This
                 function must never return the same value twice.
         Returns:
-            tuple of (user_id, refresh_token)
+            tuple of (user_id, new_refresh_token, device_id)
         Raises:
             StoreError if no user was found with that refresh token.
         """
@@ -282,12 +372,13 @@ class RegistrationStore(SQLBaseStore):
         )
 
     def _exchange_refresh_token(self, txn, old_token, token_generator):
-        sql = "SELECT user_id FROM refresh_tokens WHERE token = ?"
+        sql = "SELECT user_id, device_id FROM refresh_tokens WHERE token = ?"
         txn.execute(sql, (old_token,))
         rows = self.cursor_to_dict(txn)
         if not rows:
             raise StoreError(403, "Did not recognize refresh token")
         user_id = rows[0]["user_id"]
+        device_id = rows[0]["device_id"]
 
         # TODO(danielwh): Maybe perform a validation on the macaroon that
         # macaroon.user_id == user_id.
@@ -296,7 +387,7 @@ class RegistrationStore(SQLBaseStore):
         sql = "UPDATE refresh_tokens SET token = ? WHERE token = ?"
         txn.execute(sql, (new_token, old_token,))
 
-        return user_id, new_token
+        return user_id, new_token, device_id
 
     @defer.inlineCallbacks
     def is_server_admin(self, user):
@@ -324,7 +415,8 @@ class RegistrationStore(SQLBaseStore):
 
     def _query_for_auth(self, txn, token):
         sql = (
-            "SELECT users.name, users.is_guest, access_tokens.id as token_id"
+            "SELECT users.name, users.is_guest, access_tokens.id as token_id,"
+            " access_tokens.device_id"
             " FROM users"
             " INNER JOIN access_tokens on users.name = access_tokens.user_id"
             " WHERE token = ?"
@@ -373,6 +465,15 @@ class RegistrationStore(SQLBaseStore):
             defer.returnValue(ret['user_id'])
         defer.returnValue(None)
 
+    def user_delete_threepids(self, user_id):
+        return self._simple_delete(
+            "user_threepids",
+            keyvalues={
+                "user_id": user_id,
+            },
+            desc="user_delete_threepids",
+        )
+
     @defer.inlineCallbacks
     def count_all_users(self):
         """Counts all users registered on the homeserver."""
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index 26933e593a..8251f58670 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -18,7 +18,6 @@ from twisted.internet import defer
 from synapse.api.errors import StoreError
 
 from ._base import SQLBaseStore
-from synapse.util.caches.descriptors import cachedInlineCallbacks
 from .engines import PostgresEngine, Sqlite3Engine
 
 import collections
@@ -192,37 +191,6 @@ class RoomStore(SQLBaseStore):
             # This should be unreachable.
             raise Exception("Unrecognized database engine")
 
-    @cachedInlineCallbacks()
-    def get_room_name_and_aliases(self, room_id):
-        def f(txn):
-            sql = (
-                "SELECT event_id FROM current_state_events "
-                "WHERE room_id = ? "
-            )
-
-            sql += " AND ((type = 'm.room.name' AND state_key = '')"
-            sql += " OR type = 'm.room.aliases')"
-
-            txn.execute(sql, (room_id,))
-            results = self.cursor_to_dict(txn)
-
-            return self._parse_events_txn(txn, results)
-
-        events = yield self.runInteraction("get_room_name_and_aliases", f)
-
-        name = None
-        aliases = []
-
-        for e in events:
-            if e.type == 'm.room.name':
-                if 'name' in e.content:
-                    name = e.content['name']
-            elif e.type == 'm.room.aliases':
-                if 'aliases' in e.content:
-                    aliases.extend(e.content['aliases'])
-
-        defer.returnValue((name, aliases))
-
     def add_event_report(self, room_id, event_id, user_id, reason, content,
                          received_ts):
         next_id = self._event_reports_id_gen.get_next()
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index face685ed2..8bd693be72 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -59,9 +59,6 @@ class RoomMemberStore(SQLBaseStore):
             txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,))
             txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
             txn.call_after(
-                self.get_users_with_pushers_in_room.invalidate, (event.room_id,)
-            )
-            txn.call_after(
                 self._membership_stream_cache.entity_has_changed,
                 event.state_key, event.internal_metadata.stream_ordering
             )
@@ -241,30 +238,10 @@ class RoomMemberStore(SQLBaseStore):
 
         return results
 
-    @cached(max_entries=5000)
+    @cachedInlineCallbacks(max_entries=5000)
     def get_joined_hosts_for_room(self, room_id):
-        return self.runInteraction(
-            "get_joined_hosts_for_room",
-            self._get_joined_hosts_for_room_txn,
-            room_id,
-        )
-
-    def _get_joined_hosts_for_room_txn(self, txn, room_id):
-        rows = self._get_members_rows_txn(
-            txn,
-            room_id, membership=Membership.JOIN
-        )
-
-        joined_domains = set(get_domain_from_id(r["user_id"]) for r in rows)
-
-        return joined_domains
-
-    def _get_members_events_txn(self, txn, room_id, membership=None, user_id=None):
-        rows = self._get_members_rows_txn(
-            txn,
-            room_id, membership, user_id,
-        )
-        return [r["event_id"] for r in rows]
+        user_ids = yield self.get_users_in_room(room_id)
+        defer.returnValue(set(get_domain_from_id(uid) for uid in user_ids))
 
     def _get_members_rows_txn(self, txn, room_id, membership=None, user_id=None):
         where_clause = "c.room_id = ?"
diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py
index b417e3ac08..5b7d8d1ab5 100644
--- a/synapse/storage/schema/delta/30/as_users.py
+++ b/synapse/storage/schema/delta/30/as_users.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from synapse.storage.appservice import ApplicationServiceStore
+from synapse.config.appservice import load_appservices
 
 
 logger = logging.getLogger(__name__)
@@ -38,7 +38,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
         logger.warning("Could not get app_service_config_files from config")
         pass
 
-    appservices = ApplicationServiceStore.load_appservices(
+    appservices = load_appservices(
         config.server_name, config_files
     )
 
diff --git a/synapse/storage/schema/delta/33/access_tokens_device_index.sql b/synapse/storage/schema/delta/33/access_tokens_device_index.sql
new file mode 100644
index 0000000000..61ad3fe3e8
--- /dev/null
+++ b/synapse/storage/schema/delta/33/access_tokens_device_index.sql
@@ -0,0 +1,17 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('access_tokens_device_index', '{}');
diff --git a/synapse/storage/schema/delta/33/devices.sql b/synapse/storage/schema/delta/33/devices.sql
new file mode 100644
index 0000000000..eca7268d82
--- /dev/null
+++ b/synapse/storage/schema/delta/33/devices.sql
@@ -0,0 +1,21 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE devices (
+    user_id TEXT NOT NULL,
+    device_id TEXT NOT NULL,
+    display_name TEXT,
+    CONSTRAINT device_uniqueness UNIQUE (user_id, device_id)
+);
diff --git a/synapse/storage/schema/delta/33/devices_for_e2e_keys.sql b/synapse/storage/schema/delta/33/devices_for_e2e_keys.sql
new file mode 100644
index 0000000000..aa4a3b9f2f
--- /dev/null
+++ b/synapse/storage/schema/delta/33/devices_for_e2e_keys.sql
@@ -0,0 +1,19 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- make sure that we have a device record for each set of E2E keys, so that the
+-- user can delete them if they like.
+INSERT INTO devices
+    SELECT user_id, device_id, NULL FROM e2e_device_keys_json;
diff --git a/synapse/storage/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql b/synapse/storage/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql
new file mode 100644
index 0000000000..6671573398
--- /dev/null
+++ b/synapse/storage/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql
@@ -0,0 +1,20 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- a previous version of the "devices_for_e2e_keys" delta set all the device
+-- names to "unknown device". This wasn't terribly helpful
+UPDATE devices
+    SET display_name = NULL
+    WHERE display_name = 'unknown device';
diff --git a/synapse/storage/schema/delta/33/event_fields.py b/synapse/storage/schema/delta/33/event_fields.py
new file mode 100644
index 0000000000..83066cccc9
--- /dev/null
+++ b/synapse/storage/schema/delta/33/event_fields.py
@@ -0,0 +1,60 @@
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.storage.prepare_database import get_statements
+
+import logging
+import ujson
+
+logger = logging.getLogger(__name__)
+
+
+ALTER_TABLE = """
+ALTER TABLE events ADD COLUMN sender TEXT;
+ALTER TABLE events ADD COLUMN contains_url BOOLEAN;
+"""
+
+
+def run_create(cur, database_engine, *args, **kwargs):
+    for statement in get_statements(ALTER_TABLE.splitlines()):
+        cur.execute(statement)
+
+    cur.execute("SELECT MIN(stream_ordering) FROM events")
+    rows = cur.fetchall()
+    min_stream_id = rows[0][0]
+
+    cur.execute("SELECT MAX(stream_ordering) FROM events")
+    rows = cur.fetchall()
+    max_stream_id = rows[0][0]
+
+    if min_stream_id is not None and max_stream_id is not None:
+        progress = {
+            "target_min_stream_id_inclusive": min_stream_id,
+            "max_stream_id_exclusive": max_stream_id + 1,
+            "rows_inserted": 0,
+        }
+        progress_json = ujson.dumps(progress)
+
+        sql = (
+            "INSERT into background_updates (update_name, progress_json)"
+            " VALUES (?, ?)"
+        )
+
+        sql = database_engine.convert_param_style(sql)
+
+        cur.execute(sql, ("event_fields_sender_url", progress_json))
+
+
+def run_upgrade(cur, database_engine, *args, **kwargs):
+    pass
diff --git a/synapse/storage/schema/delta/33/refreshtoken_device.sql b/synapse/storage/schema/delta/33/refreshtoken_device.sql
new file mode 100644
index 0000000000..290bd6da86
--- /dev/null
+++ b/synapse/storage/schema/delta/33/refreshtoken_device.sql
@@ -0,0 +1,16 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE refresh_tokens ADD COLUMN device_id TEXT;
diff --git a/synapse/storage/schema/delta/33/refreshtoken_device_index.sql b/synapse/storage/schema/delta/33/refreshtoken_device_index.sql
new file mode 100644
index 0000000000..bb225dafbf
--- /dev/null
+++ b/synapse/storage/schema/delta/33/refreshtoken_device_index.sql
@@ -0,0 +1,17 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('refresh_tokens_device_index', '{}');
diff --git a/synapse/storage/schema/delta/33/remote_media_ts.py b/synapse/storage/schema/delta/33/remote_media_ts.py
new file mode 100644
index 0000000000..55ae43f395
--- /dev/null
+++ b/synapse/storage/schema/delta/33/remote_media_ts.py
@@ -0,0 +1,31 @@
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+
+
+ALTER_TABLE = "ALTER TABLE remote_media_cache ADD COLUMN last_access_ts BIGINT"
+
+
+def run_create(cur, database_engine, *args, **kwargs):
+    cur.execute(ALTER_TABLE)
+
+
+def run_upgrade(cur, database_engine, *args, **kwargs):
+    cur.execute(
+        database_engine.convert_param_style(
+            "UPDATE remote_media_cache SET last_access_ts = ?"
+        ),
+        (int(time.time() * 1000),)
+    )
diff --git a/synapse/storage/schema/delta/33/user_ips_index.sql b/synapse/storage/schema/delta/33/user_ips_index.sql
new file mode 100644
index 0000000000..473f75a78e
--- /dev/null
+++ b/synapse/storage/schema/delta/33/user_ips_index.sql
@@ -0,0 +1,17 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('user_ips_device_index', '{}');
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index 0224299625..12941d1775 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -21,6 +21,7 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine
 
 import logging
 import re
+import ujson as json
 
 
 logger = logging.getLogger(__name__)
@@ -52,7 +53,7 @@ class SearchStore(BackgroundUpdateStore):
 
         def reindex_search_txn(txn):
             sql = (
-                "SELECT stream_ordering, event_id FROM events"
+                "SELECT stream_ordering, event_id, room_id, type, content FROM events"
                 " WHERE ? <= stream_ordering AND stream_ordering < ?"
                 " AND (%s)"
                 " ORDER BY stream_ordering DESC"
@@ -61,28 +62,30 @@ class SearchStore(BackgroundUpdateStore):
 
             txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
 
-            rows = txn.fetchall()
+            rows = self.cursor_to_dict(txn)
             if not rows:
                 return 0
 
-            min_stream_id = rows[-1][0]
-            event_ids = [row[1] for row in rows]
-
-            events = self._get_events_txn(txn, event_ids)
+            min_stream_id = rows[-1]["stream_ordering"]
 
             event_search_rows = []
-            for event in events:
+            for row in rows:
                 try:
-                    event_id = event.event_id
-                    room_id = event.room_id
-                    content = event.content
-                    if event.type == "m.room.message":
+                    event_id = row["event_id"]
+                    room_id = row["room_id"]
+                    etype = row["type"]
+                    try:
+                        content = json.loads(row["content"])
+                    except:
+                        continue
+
+                    if etype == "m.room.message":
                         key = "content.body"
                         value = content["body"]
-                    elif event.type == "m.room.topic":
+                    elif etype == "m.room.topic":
                         key = "content.topic"
                         value = content["topic"]
-                    elif event.type == "m.room.name":
+                    elif etype == "m.room.name":
                         key = "content.name"
                         value = content["name"]
                 except (KeyError, AttributeError):
diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py
index b10f2a5787..ea6823f18d 100644
--- a/synapse/storage/signatures.py
+++ b/synapse/storage/signatures.py
@@ -19,17 +19,24 @@ from ._base import SQLBaseStore
 
 from unpaddedbase64 import encode_base64
 from synapse.crypto.event_signing import compute_event_reference_hash
+from synapse.util.caches.descriptors import cached, cachedList
 
 
 class SignatureStore(SQLBaseStore):
     """Persistence for event signatures and hashes"""
 
+    @cached(lru=True)
+    def get_event_reference_hash(self, event_id):
+        return self._get_event_reference_hashes_txn(event_id)
+
+    @cachedList(cached_method_name="get_event_reference_hash",
+                list_name="event_ids", num_args=1)
     def get_event_reference_hashes(self, event_ids):
         def f(txn):
-            return [
-                self._get_event_reference_hashes_txn(txn, ev)
-                for ev in event_ids
-            ]
+            return {
+                event_id: self._get_event_reference_hashes_txn(txn, event_id)
+                for event_id in event_ids
+            }
 
         return self.runInteraction(
             "get_event_reference_hashes",
@@ -41,15 +48,15 @@ class SignatureStore(SQLBaseStore):
         hashes = yield self.get_event_reference_hashes(
             event_ids
         )
-        hashes = [
-            {
+        hashes = {
+            e_id: {
                 k: encode_base64(v) for k, v in h.items()
                 if k == "sha256"
             }
-            for h in hashes
-        ]
+            for e_id, h in hashes.items()
+        }
 
-        defer.returnValue(zip(event_ids, hashes))
+        defer.returnValue(hashes.items())
 
     def _get_event_reference_hashes_txn(self, txn, event_id):
         """Get all the hashes for a given PDU.
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 95b12559a6..862c5c3ea1 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -40,6 +40,7 @@ from synapse.util.caches.descriptors import cached
 from synapse.api.constants import EventTypes
 from synapse.types import RoomStreamToken
 from synapse.util.logcontext import preserve_fn
+from synapse.storage.engines import PostgresEngine, Sqlite3Engine
 
 import logging
 
@@ -54,26 +55,92 @@ _STREAM_TOKEN = "stream"
 _TOPOLOGICAL_TOKEN = "topological"
 
 
-def lower_bound(token):
+def lower_bound(token, engine, inclusive=False):
+    inclusive = "=" if inclusive else ""
     if token.topological is None:
-        return "(%d < %s)" % (token.stream, "stream_ordering")
+        return "(%d <%s %s)" % (token.stream, inclusive, "stream_ordering")
     else:
-        return "(%d < %s OR (%d = %s AND %d < %s))" % (
+        if isinstance(engine, PostgresEngine):
+            # Postgres doesn't optimise ``(x < a) OR (x=a AND y<b)`` as well
+            # as it optimises ``(x,y) < (a,b)`` on multicolumn indexes. So we
+            # use the later form when running against postgres.
+            return "((%d,%d) <%s (%s,%s))" % (
+                token.topological, token.stream, inclusive,
+                "topological_ordering", "stream_ordering",
+            )
+        return "(%d < %s OR (%d = %s AND %d <%s %s))" % (
             token.topological, "topological_ordering",
             token.topological, "topological_ordering",
-            token.stream, "stream_ordering",
+            token.stream, inclusive, "stream_ordering",
         )
 
 
-def upper_bound(token):
+def upper_bound(token, engine, inclusive=True):
+    inclusive = "=" if inclusive else ""
     if token.topological is None:
-        return "(%d >= %s)" % (token.stream, "stream_ordering")
+        return "(%d >%s %s)" % (token.stream, inclusive, "stream_ordering")
     else:
-        return "(%d > %s OR (%d = %s AND %d >= %s))" % (
+        if isinstance(engine, PostgresEngine):
+            # Postgres doesn't optimise ``(x > a) OR (x=a AND y>b)`` as well
+            # as it optimises ``(x,y) > (a,b)`` on multicolumn indexes. So we
+            # use the later form when running against postgres.
+            return "((%d,%d) >%s (%s,%s))" % (
+                token.topological, token.stream, inclusive,
+                "topological_ordering", "stream_ordering",
+            )
+        return "(%d > %s OR (%d = %s AND %d >%s %s))" % (
             token.topological, "topological_ordering",
             token.topological, "topological_ordering",
-            token.stream, "stream_ordering",
+            token.stream, inclusive, "stream_ordering",
+        )
+
+
+def filter_to_clause(event_filter):
+    # NB: This may create SQL clauses that don't optimise well (and we don't
+    # have indices on all possible clauses). E.g. it may create
+    # "room_id == X AND room_id != X", which postgres doesn't optimise.
+
+    if not event_filter:
+        return "", []
+
+    clauses = []
+    args = []
+
+    if event_filter.types:
+        clauses.append(
+            "(%s)" % " OR ".join("type = ?" for _ in event_filter.types)
         )
+        args.extend(event_filter.types)
+
+    for typ in event_filter.not_types:
+        clauses.append("type != ?")
+        args.append(typ)
+
+    if event_filter.senders:
+        clauses.append(
+            "(%s)" % " OR ".join("sender = ?" for _ in event_filter.senders)
+        )
+        args.extend(event_filter.senders)
+
+    for sender in event_filter.not_senders:
+        clauses.append("sender != ?")
+        args.append(sender)
+
+    if event_filter.rooms:
+        clauses.append(
+            "(%s)" % " OR ".join("room_id = ?" for _ in event_filter.rooms)
+        )
+        args.extend(event_filter.rooms)
+
+    for room_id in event_filter.not_rooms:
+        clauses.append("room_id != ?")
+        args.append(room_id)
+
+    if event_filter.contains_url:
+        clauses.append("contains_url = ?")
+        args.append(event_filter.contains_url)
+
+    return " AND ".join(clauses), args
 
 
 class StreamStore(SQLBaseStore):
@@ -132,29 +199,25 @@ class StreamStore(SQLBaseStore):
                         return True
                 return False
 
-            ret = self._get_events_txn(
-                txn,
-                # apply the filter on the room id list
-                [
-                    r["event_id"] for r in rows
-                    if app_service_interested(r)
-                ],
-                get_prev_content=True
-            )
+            return [r for r in rows if app_service_interested(r)]
 
-            self._set_before_and_after(ret, rows)
+        rows = yield self.runInteraction("get_appservice_room_stream", f)
 
-            if rows:
-                key = "s%d" % max(r["stream_ordering"] for r in rows)
-            else:
-                # Assume we didn't get anything because there was nothing to
-                # get.
-                key = to_key
+        ret = yield self._get_events(
+            [r["event_id"] for r in rows],
+            get_prev_content=True
+        )
 
-            return ret, key
+        self._set_before_and_after(ret, rows, topo_order=from_id is None)
 
-        results = yield self.runInteraction("get_appservice_room_stream", f)
-        defer.returnValue(results)
+        if rows:
+            key = "s%d" % max(r["stream_ordering"] for r in rows)
+        else:
+            # Assume we didn't get anything because there was nothing to
+            # get.
+            key = to_key
+
+        defer.returnValue((ret, key))
 
     @defer.inlineCallbacks
     def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0,
@@ -305,25 +368,35 @@ class StreamStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def paginate_room_events(self, room_id, from_key, to_key=None,
-                             direction='b', limit=-1):
+                             direction='b', limit=-1, event_filter=None):
         # Tokens really represent positions between elements, but we use
         # the convention of pointing to the event before the gap. Hence
         # we have a bit of asymmetry when it comes to equalities.
         args = [False, room_id]
         if direction == 'b':
             order = "DESC"
-            bounds = upper_bound(RoomStreamToken.parse(from_key))
+            bounds = upper_bound(
+                RoomStreamToken.parse(from_key), self.database_engine
+            )
             if to_key:
-                bounds = "%s AND %s" % (
-                    bounds, lower_bound(RoomStreamToken.parse(to_key))
-                )
+                bounds = "%s AND %s" % (bounds, lower_bound(
+                    RoomStreamToken.parse(to_key), self.database_engine
+                ))
         else:
             order = "ASC"
-            bounds = lower_bound(RoomStreamToken.parse(from_key))
+            bounds = lower_bound(
+                RoomStreamToken.parse(from_key), self.database_engine
+            )
             if to_key:
-                bounds = "%s AND %s" % (
-                    bounds, upper_bound(RoomStreamToken.parse(to_key))
-                )
+                bounds = "%s AND %s" % (bounds, upper_bound(
+                    RoomStreamToken.parse(to_key), self.database_engine
+                ))
+
+        filter_clause, filter_args = filter_to_clause(event_filter)
+
+        if filter_clause:
+            bounds += " AND " + filter_clause
+            args.extend(filter_args)
 
         if int(limit) > 0:
             args.append(int(limit))
@@ -491,13 +564,13 @@ class StreamStore(SQLBaseStore):
             row["topological_ordering"], row["stream_ordering"],)
         )
 
-    def get_max_topological_token_for_stream_and_room(self, room_id, stream_key):
+    def get_max_topological_token(self, room_id, stream_key):
         sql = (
             "SELECT max(topological_ordering) FROM events"
             " WHERE room_id = ? AND stream_ordering < ?"
         )
         return self._execute(
-            "get_max_topological_token_for_stream_and_room", None,
+            "get_max_topological_token", None,
             sql, room_id, stream_key,
         ).addCallback(
             lambda r: r[0][0] if r else 0
@@ -590,32 +663,60 @@ class StreamStore(SQLBaseStore):
             retcols=["stream_ordering", "topological_ordering"],
         )
 
-        stream_ordering = results["stream_ordering"]
-        topological_ordering = results["topological_ordering"]
-
-        query_before = (
-            "SELECT topological_ordering, stream_ordering, event_id FROM events"
-            " WHERE room_id = ? AND (topological_ordering < ?"
-            " OR (topological_ordering = ? AND stream_ordering < ?))"
-            " ORDER BY topological_ordering DESC, stream_ordering DESC"
-            " LIMIT ?"
+        token = RoomStreamToken(
+            results["topological_ordering"],
+            results["stream_ordering"],
         )
 
-        query_after = (
-            "SELECT topological_ordering, stream_ordering, event_id FROM events"
-            " WHERE room_id = ? AND (topological_ordering > ?"
-            " OR (topological_ordering = ? AND stream_ordering > ?))"
-            " ORDER BY topological_ordering ASC, stream_ordering ASC"
-            " LIMIT ?"
-        )
+        if isinstance(self.database_engine, Sqlite3Engine):
+            # SQLite3 doesn't optimise ``(x < a) OR (x = a AND y < b)``
+            # So we give pass it to SQLite3 as the UNION ALL of the two queries.
+
+            query_before = (
+                "SELECT topological_ordering, stream_ordering, event_id FROM events"
+                " WHERE room_id = ? AND topological_ordering < ?"
+                " UNION ALL"
+                " SELECT topological_ordering, stream_ordering, event_id FROM events"
+                " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering < ?"
+                " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
+            )
+            before_args = (
+                room_id, token.topological,
+                room_id, token.topological, token.stream,
+                before_limit,
+            )
 
-        txn.execute(
-            query_before,
-            (
-                room_id, topological_ordering, topological_ordering,
-                stream_ordering, before_limit,
+            query_after = (
+                "SELECT topological_ordering, stream_ordering, event_id FROM events"
+                " WHERE room_id = ? AND topological_ordering > ?"
+                " UNION ALL"
+                " SELECT topological_ordering, stream_ordering, event_id FROM events"
+                " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering > ?"
+                " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?"
             )
-        )
+            after_args = (
+                room_id, token.topological,
+                room_id, token.topological, token.stream,
+                after_limit,
+            )
+        else:
+            query_before = (
+                "SELECT topological_ordering, stream_ordering, event_id FROM events"
+                " WHERE room_id = ? AND %s"
+                " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
+            ) % (upper_bound(token, self.database_engine, inclusive=False),)
+
+            before_args = (room_id, before_limit)
+
+            query_after = (
+                "SELECT topological_ordering, stream_ordering, event_id FROM events"
+                " WHERE room_id = ? AND %s"
+                " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?"
+            ) % (lower_bound(token, self.database_engine, inclusive=False),)
+
+            after_args = (room_id, after_limit)
+
+        txn.execute(query_before, before_args)
 
         rows = self.cursor_to_dict(txn)
         events_before = [r["event_id"] for r in rows]
@@ -627,17 +728,11 @@ class StreamStore(SQLBaseStore):
             ))
         else:
             start_token = str(RoomStreamToken(
-                topological_ordering,
-                stream_ordering - 1,
+                token.topological,
+                token.stream - 1,
             ))
 
-        txn.execute(
-            query_after,
-            (
-                room_id, topological_ordering, topological_ordering,
-                stream_ordering, after_limit,
-            )
-        )
+        txn.execute(query_after, after_args)
 
         rows = self.cursor_to_dict(txn)
         events_after = [r["event_id"] for r in rows]
@@ -648,10 +743,7 @@ class StreamStore(SQLBaseStore):
                 rows[-1]["stream_ordering"],
             ))
         else:
-            end_token = str(RoomStreamToken(
-                topological_ordering,
-                stream_ordering,
-            ))
+            end_token = str(token)
 
         return {
             "before": {
diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py
index 9da23f34cb..5a2c1aa59b 100644
--- a/synapse/storage/tags.py
+++ b/synapse/storage/tags.py
@@ -68,6 +68,9 @@ class TagsStore(SQLBaseStore):
             A deferred list of tuples of stream_id int, user_id string,
             room_id string, tag string and content string.
         """
+        if last_id == current_id:
+            defer.returnValue([])
+
         def get_all_updated_tags_txn(txn):
             sql = (
                 "SELECT stream_id, user_id, room_id"
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index 6c7481a728..6258ff1725 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -24,6 +24,7 @@ from collections import namedtuple
 
 import itertools
 import logging
+import ujson as json
 
 logger = logging.getLogger(__name__)
 
@@ -101,7 +102,7 @@ class TransactionStore(SQLBaseStore):
         )
 
         if result and result["response_code"]:
-            return result["response_code"], result["response_json"]
+            return result["response_code"], json.loads(str(result["response_json"]))
         else:
             return None
 
diff --git a/synapse/types.py b/synapse/types.py
index 7b6ae44bdd..5349b0c450 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -18,11 +18,45 @@ from synapse.api.errors import SynapseError
 from collections import namedtuple
 
 
-Requester = namedtuple("Requester", ["user", "access_token_id", "is_guest"])
+Requester = namedtuple("Requester",
+                       ["user", "access_token_id", "is_guest", "device_id"])
+"""
+Represents the user making a request
+
+Attributes:
+    user (UserID):  id of the user making the request
+    access_token_id (int|None):  *ID* of the access token used for this
+        request, or None if it came via the appservice API or similar
+    is_guest (bool):  True if the user making this request is a guest user
+    device_id (str|None):  device_id which was set at authentication time
+"""
+
+
+def create_requester(user_id, access_token_id=None, is_guest=False,
+                     device_id=None):
+    """
+    Create a new ``Requester`` object
+
+    Args:
+        user_id (str|UserID):  id of the user making the request
+        access_token_id (int|None):  *ID* of the access token used for this
+            request, or None if it came via the appservice API or similar
+        is_guest (bool):  True if the user making this request is a guest user
+        device_id (str|None):  device_id which was set at authentication time
+
+    Returns:
+        Requester
+    """
+    if not isinstance(user_id, UserID):
+        user_id = UserID.from_string(user_id)
+    return Requester(user_id, access_token_id, is_guest, device_id)
 
 
 def get_domain_from_id(string):
-    return string.split(":", 1)[1]
+    try:
+        return string.split(":", 1)[1]
+    except IndexError:
+        raise SynapseError(400, "Invalid ID: %r", string)
 
 
 class DomainSpecificString(
diff --git a/synapse/util/async.py b/synapse/util/async.py
index 0d6f48e2d8..c84b23ff46 100644
--- a/synapse/util/async.py
+++ b/synapse/util/async.py
@@ -102,6 +102,15 @@ class ObservableDeferred(object):
     def observers(self):
         return self._observers
 
+    def has_called(self):
+        return self._result is not None
+
+    def has_succeeded(self):
+        return self._result is not None and self._result[0] is True
+
+    def get_result(self):
+        return self._result[1]
+
     def __getattr__(self, name):
         return getattr(self._deferred, name)
 
@@ -185,3 +194,85 @@ class Linearizer(object):
                     self.key_to_defer.pop(key, None)
 
         defer.returnValue(_ctx_manager())
+
+
+class ReadWriteLock(object):
+    """A deferred style read write lock.
+
+    Example:
+
+        with (yield read_write_lock.read("test_key")):
+            # do some work
+    """
+
+    # IMPLEMENTATION NOTES
+    #
+    # We track the most recent queued reader and writer deferreds (which get
+    # resolved when they release the lock).
+    #
+    # Read: We know its safe to acquire a read lock when the latest writer has
+    # been resolved. The new reader is appeneded to the list of latest readers.
+    #
+    # Write: We know its safe to acquire the write lock when both the latest
+    # writers and readers have been resolved. The new writer replaces the latest
+    # writer.
+
+    def __init__(self):
+        # Latest readers queued
+        self.key_to_current_readers = {}
+
+        # Latest writer queued
+        self.key_to_current_writer = {}
+
+    @defer.inlineCallbacks
+    def read(self, key):
+        new_defer = defer.Deferred()
+
+        curr_readers = self.key_to_current_readers.setdefault(key, set())
+        curr_writer = self.key_to_current_writer.get(key, None)
+
+        curr_readers.add(new_defer)
+
+        # We wait for the latest writer to finish writing. We can safely ignore
+        # any existing readers... as they're readers.
+        yield curr_writer
+
+        @contextmanager
+        def _ctx_manager():
+            try:
+                yield
+            finally:
+                new_defer.callback(None)
+                self.key_to_current_readers.get(key, set()).discard(new_defer)
+
+        defer.returnValue(_ctx_manager())
+
+    @defer.inlineCallbacks
+    def write(self, key):
+        new_defer = defer.Deferred()
+
+        curr_readers = self.key_to_current_readers.get(key, set())
+        curr_writer = self.key_to_current_writer.get(key, None)
+
+        # We wait on all latest readers and writer.
+        to_wait_on = list(curr_readers)
+        if curr_writer:
+            to_wait_on.append(curr_writer)
+
+        # We can clear the list of current readers since the new writer waits
+        # for them to finish.
+        curr_readers.clear()
+        self.key_to_current_writer[key] = new_defer
+
+        yield defer.gatherResults(to_wait_on)
+
+        @contextmanager
+        def _ctx_manager():
+            try:
+                yield
+            finally:
+                new_defer.callback(None)
+                if self.key_to_current_writer[key] == new_defer:
+                    self.key_to_current_writer.pop(key)
+
+        defer.returnValue(_ctx_manager())
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index d53569ca49..ebd715c5dc 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -24,11 +24,21 @@ DEBUG_CACHES = False
 metrics = synapse.metrics.get_metrics_for("synapse.util.caches")
 
 caches_by_name = {}
-cache_counter = metrics.register_cache(
-    "cache",
-    lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()},
-    labels=["name"],
-)
+# cache_counter = metrics.register_cache(
+#     "cache",
+#     lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()},
+#     labels=["name"],
+# )
+
+
+def register_cache(name, cache):
+    caches_by_name[name] = cache
+    return metrics.register_cache(
+        "cache",
+        lambda: len(cache),
+        name,
+    )
+
 
 _string_cache = LruCache(int(5000 * CACHE_SIZE_FACTOR))
 caches_by_name["string_cache"] = _string_cache
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 758f5982b0..f31dfb22b7 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -22,7 +22,7 @@ from synapse.util.logcontext import (
     PreserveLoggingContext, preserve_context_over_deferred, preserve_context_over_fn
 )
 
-from . import caches_by_name, DEBUG_CACHES, cache_counter
+from . import DEBUG_CACHES, register_cache
 
 from twisted.internet import defer
 
@@ -33,6 +33,7 @@ import functools
 import inspect
 import threading
 
+
 logger = logging.getLogger(__name__)
 
 
@@ -43,6 +44,15 @@ CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1))
 
 
 class Cache(object):
+    __slots__ = (
+        "cache",
+        "max_entries",
+        "name",
+        "keylen",
+        "sequence",
+        "thread",
+        "metrics",
+    )
 
     def __init__(self, name, max_entries=1000, keylen=1, lru=True, tree=False):
         if lru:
@@ -59,7 +69,7 @@ class Cache(object):
         self.keylen = keylen
         self.sequence = 0
         self.thread = None
-        caches_by_name[name] = self.cache
+        self.metrics = register_cache(name, self.cache)
 
     def check_thread(self):
         expected_thread = self.thread
@@ -74,10 +84,10 @@ class Cache(object):
     def get(self, key, default=_CacheSentinel):
         val = self.cache.get(key, _CacheSentinel)
         if val is not _CacheSentinel:
-            cache_counter.inc_hits(self.name)
+            self.metrics.inc_hits()
             return val
 
-        cache_counter.inc_misses(self.name)
+        self.metrics.inc_misses()
 
         if default is _CacheSentinel:
             raise KeyError()
@@ -293,16 +303,21 @@ class CacheListDescriptor(object):
 
             # cached is a dict arg -> deferred, where deferred results in a
             # 2-tuple (`arg`, `result`)
-            cached = {}
+            results = {}
+            cached_defers = {}
             missing = []
             for arg in list_args:
                 key = list(keyargs)
                 key[self.list_pos] = arg
 
                 try:
-                    res = cache.get(tuple(key)).observe()
-                    res.addCallback(lambda r, arg: (arg, r), arg)
-                    cached[arg] = res
+                    res = cache.get(tuple(key))
+                    if not res.has_succeeded():
+                        res = res.observe()
+                        res.addCallback(lambda r, arg: (arg, r), arg)
+                        cached_defers[arg] = res
+                    else:
+                        results[arg] = res.get_result()
                 except KeyError:
                     missing.append(arg)
 
@@ -340,12 +355,21 @@ class CacheListDescriptor(object):
                     res = observer.observe()
                     res.addCallback(lambda r, arg: (arg, r), arg)
 
-                    cached[arg] = res
-
-            return preserve_context_over_deferred(defer.gatherResults(
-                cached.values(),
-                consumeErrors=True,
-            ).addErrback(unwrapFirstError).addCallback(lambda res: dict(res)))
+                    cached_defers[arg] = res
+
+            if cached_defers:
+                def update_results_dict(res):
+                    results.update(res)
+                    return results
+
+                return preserve_context_over_deferred(defer.gatherResults(
+                    cached_defers.values(),
+                    consumeErrors=True,
+                ).addCallback(update_results_dict).addErrback(
+                    unwrapFirstError
+                ))
+            else:
+                return results
 
         obj.__dict__[self.orig.__name__] = wrapped
 
diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py
index f92d80542b..b0ca1bb79d 100644
--- a/synapse/util/caches/dictionary_cache.py
+++ b/synapse/util/caches/dictionary_cache.py
@@ -15,7 +15,7 @@
 
 from synapse.util.caches.lrucache import LruCache
 from collections import namedtuple
-from . import caches_by_name, cache_counter
+from . import register_cache
 import threading
 import logging
 
@@ -43,7 +43,7 @@ class DictionaryCache(object):
             __slots__ = []
 
         self.sentinel = Sentinel()
-        caches_by_name[name] = self.cache
+        self.metrics = register_cache(name, self.cache)
 
     def check_thread(self):
         expected_thread = self.thread
@@ -58,7 +58,7 @@ class DictionaryCache(object):
     def get(self, key, dict_keys=None):
         entry = self.cache.get(key, self.sentinel)
         if entry is not self.sentinel:
-            cache_counter.inc_hits(self.name)
+            self.metrics.inc_hits()
 
             if dict_keys is None:
                 return DictionaryEntry(entry.full, dict(entry.value))
@@ -69,7 +69,7 @@ class DictionaryCache(object):
                     if k in entry.value
                 })
 
-        cache_counter.inc_misses(self.name)
+        self.metrics.inc_misses()
         return DictionaryEntry(False, {})
 
     def invalidate(self, key):
diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
index 2b68c1ac93..080388958f 100644
--- a/synapse/util/caches/expiringcache.py
+++ b/synapse/util/caches/expiringcache.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.util.caches import cache_counter, caches_by_name
+from synapse.util.caches import register_cache
 
 import logging
 
@@ -49,7 +49,7 @@ class ExpiringCache(object):
 
         self._cache = {}
 
-        caches_by_name[cache_name] = self._cache
+        self.metrics = register_cache(cache_name, self._cache)
 
     def start(self):
         if not self._expiry_ms:
@@ -78,9 +78,9 @@ class ExpiringCache(object):
     def __getitem__(self, key):
         try:
             entry = self._cache[key]
-            cache_counter.inc_hits(self._cache_name)
+            self.metrics.inc_hits()
         except KeyError:
-            cache_counter.inc_misses(self._cache_name)
+            self.metrics.inc_misses()
             raise
 
         if self._reset_expiry_on_get:
diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index 36686b479e..00af539880 100644
--- a/synapse/util/caches/response_cache.py
+++ b/synapse/util/caches/response_cache.py
@@ -24,9 +24,12 @@ class ResponseCache(object):
     used rather than trying to compute a new response.
     """
 
-    def __init__(self):
+    def __init__(self, hs, timeout_ms=0):
         self.pending_result_cache = {}  # Requests that haven't finished yet.
 
+        self.clock = hs.get_clock()
+        self.timeout_sec = timeout_ms / 1000.
+
     def get(self, key):
         result = self.pending_result_cache.get(key)
         if result is not None:
@@ -39,7 +42,13 @@ class ResponseCache(object):
         self.pending_result_cache[key] = result
 
         def remove(r):
-            self.pending_result_cache.pop(key, None)
+            if self.timeout_sec:
+                self.clock.call_later(
+                    self.timeout_sec,
+                    self.pending_result_cache.pop, key, None,
+                )
+            else:
+                self.pending_result_cache.pop(key, None)
             return r
 
         result.addBoth(remove)
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index ea8a74ca69..3c051dabc4 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.util.caches import cache_counter, caches_by_name
+from synapse.util.caches import register_cache
 
 
 from blist import sorteddict
@@ -42,7 +42,7 @@ class StreamChangeCache(object):
         self._cache = sorteddict()
         self._earliest_known_stream_pos = current_stream_pos
         self.name = name
-        caches_by_name[self.name] = self._cache
+        self.metrics = register_cache(self.name, self._cache)
 
         for entity, stream_pos in prefilled_cache.items():
             self.entity_has_changed(entity, stream_pos)
@@ -53,19 +53,19 @@ class StreamChangeCache(object):
         assert type(stream_pos) is int
 
         if stream_pos < self._earliest_known_stream_pos:
-            cache_counter.inc_misses(self.name)
+            self.metrics.inc_misses()
             return True
 
         latest_entity_change_pos = self._entity_to_key.get(entity, None)
         if latest_entity_change_pos is None:
-            cache_counter.inc_hits(self.name)
+            self.metrics.inc_hits()
             return False
 
         if stream_pos < latest_entity_change_pos:
-            cache_counter.inc_misses(self.name)
+            self.metrics.inc_misses()
             return True
 
-        cache_counter.inc_hits(self.name)
+        self.metrics.inc_hits()
         return False
 
     def get_entities_changed(self, entities, stream_pos):
@@ -82,10 +82,10 @@ class StreamChangeCache(object):
                 self._cache[k] for k in keys[i:]
             ).intersection(entities)
 
-            cache_counter.inc_hits(self.name)
+            self.metrics.inc_hits()
         else:
             result = entities
-            cache_counter.inc_misses(self.name)
+            self.metrics.inc_misses()
 
         return result
 
diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py
index d7cccc06b1..e68f94ce77 100644
--- a/synapse/util/distributor.py
+++ b/synapse/util/distributor.py
@@ -27,10 +27,6 @@ import logging
 logger = logging.getLogger(__name__)
 
 
-def registered_user(distributor, user):
-    return distributor.fire("registered_user", user)
-
-
 def user_left_room(distributor, user, room_id):
     return preserve_context_over_fn(
         distributor.fire,
diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py
index 5316259d15..7a87045f87 100644
--- a/synapse/util/logcontext.py
+++ b/synapse/util/logcontext.py
@@ -317,7 +317,6 @@ def preserve_fn(f):
     def g(*args, **kwargs):
         with PreserveLoggingContext(current):
             return f(*args, **kwargs)
-
     return g
 
 
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index e1f374807e..76f301f549 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -13,10 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from twisted.internet import defer
 
 from synapse.util.logcontext import LoggingContext
 import synapse.metrics
 
+from functools import wraps
 import logging
 
 
@@ -47,6 +49,18 @@ block_db_txn_duration = metrics.register_distribution(
 )
 
 
+def measure_func(name):
+    def wrapper(func):
+        @wraps(func)
+        @defer.inlineCallbacks
+        def measured_func(self, *args, **kwargs):
+            with Measure(self.clock, name):
+                r = yield func(self, *args, **kwargs)
+            defer.returnValue(r)
+        return measured_func
+    return wrapper
+
+
 class Measure(object):
     __slots__ = [
         "clock", "name", "start_context", "start", "new_context", "ru_utime",
@@ -64,7 +78,6 @@ class Measure(object):
         self.start = self.clock.time_msec()
         self.start_context = LoggingContext.current_context()
         if not self.start_context:
-            logger.warn("Entered Measure without log context: %s", self.name)
             self.start_context = LoggingContext("Measure")
             self.start_context.__enter__()
             self.created_context = True
@@ -84,8 +97,8 @@ class Measure(object):
 
         if context != self.start_context:
             logger.warn(
-                "Context have unexpectedly changed from '%s' to '%s'. (%r)",
-                context, self.start_context, self.name
+                "Context has unexpectedly changed from '%s' to '%s'. (%r)",
+                self.start_context, context, self.name
             )
             return
 
diff --git a/synapse/util/presentable_names.py b/synapse/util/presentable_names.py
index 3efa8a8206..f68676e9e7 100644
--- a/synapse/util/presentable_names.py
+++ b/synapse/util/presentable_names.py
@@ -14,6 +14,9 @@
 # limitations under the License.
 
 import re
+import logging
+
+logger = logging.getLogger(__name__)
 
 # intentionally looser than what aliases we allow to be registered since
 # other HSes may allow aliases that we would not
@@ -22,7 +25,8 @@ ALIAS_RE = re.compile(r"^#.*:.+$")
 ALL_ALONE = "Empty Room"
 
 
-def calculate_room_name(room_state, user_id, fallback_to_members=True):
+def calculate_room_name(room_state, user_id, fallback_to_members=True,
+                        fallback_to_single_member=True):
     """
     Works out a user-facing name for the given room as per Matrix
     spec recommendations.
@@ -79,7 +83,10 @@ def calculate_room_name(room_state, user_id, fallback_to_members=True):
     ):
         if ("m.room.member", my_member_event.sender) in room_state:
             inviter_member_event = room_state[("m.room.member", my_member_event.sender)]
-            return "Invite from %s" % (name_from_member_event(inviter_member_event),)
+            if fallback_to_single_member:
+                return "Invite from %s" % (name_from_member_event(inviter_member_event),)
+            else:
+                return None
         else:
             return "Room Invite"
 
@@ -105,19 +112,29 @@ def calculate_room_name(room_state, user_id, fallback_to_members=True):
             # or inbound invite, or outbound 3PID invite.
             if all_members[0].sender == user_id:
                 if "m.room.third_party_invite" in room_state_bytype:
-                    third_party_invites = room_state_bytype["m.room.third_party_invite"]
+                    third_party_invites = (
+                        room_state_bytype["m.room.third_party_invite"].values()
+                    )
+
                     if len(third_party_invites) > 0:
                         # technically third party invite events are not member
                         # events, but they are close enough
-                        return "Inviting %s" (
-                            descriptor_from_member_events(third_party_invites)
-                        )
+
+                        # FIXME: no they're not - they look nothing like a member;
+                        # they have a great big encrypted thing as their name to
+                        # prevent leaking the 3PID name...
+                        # return "Inviting %s" % (
+                        #     descriptor_from_member_events(third_party_invites)
+                        # )
+                        return "Inviting email address"
                     else:
                         return ALL_ALONE
             else:
                 return name_from_member_event(all_members[0])
         else:
             return ALL_ALONE
+    elif len(other_members) == 1 and not fallback_to_single_member:
+        return None
     else:
         return descriptor_from_member_events(other_members)
 
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index 43cf11f3f6..49527f4d21 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -128,7 +128,7 @@ class RetryDestinationLimiter(object):
             )
 
         valid_err_code = False
-        if exc_type is CodeMessageException:
+        if exc_type is not None and issubclass(exc_type, CodeMessageException):
             valid_err_code = 0 <= exc_val.code < 500
 
         if exc_type is None or valid_err_code:
diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py
index a4f156cb3b..52086df465 100644
--- a/synapse/util/versionstring.py
+++ b/synapse/util/versionstring.py
@@ -21,7 +21,7 @@ import logging
 logger = logging.getLogger(__name__)
 
 
-def get_version_string(name, module):
+def get_version_string(module):
     try:
         null = open(os.devnull, 'w')
         cwd = os.path.dirname(os.path.abspath(module.__file__))
@@ -74,11 +74,11 @@ def get_version_string(name, module):
             )
 
             return (
-                "%s/%s (%s)" % (
-                    name, module.__version__, git_version,
+                "%s (%s)" % (
+                    module.__version__, git_version,
                 )
             ).encode("ascii")
     except Exception as e:
         logger.info("Failed to check for git repository: %s", e)
 
-    return ("%s/%s" % (name, module.__version__,)).encode("ascii")
+    return module.__version__.encode("ascii")
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index ad269af0ec..e91723ca3d 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -45,6 +45,7 @@ class AuthTestCase(unittest.TestCase):
         user_info = {
             "name": self.test_user,
             "token_id": "ditto",
+            "device_id": "device",
         }
         self.store.get_user_by_access_token = Mock(return_value=user_info)
 
@@ -143,7 +144,10 @@ class AuthTestCase(unittest.TestCase):
         # TODO(danielwh): Remove this mock when we remove the
         # get_user_by_access_token fallback.
         self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
+            return_value={
+                "name": "@baldrick:matrix.org",
+                "device_id": "device",
+            }
         )
 
         user_id = "@baldrick:matrix.org"
@@ -158,6 +162,10 @@ class AuthTestCase(unittest.TestCase):
         user = user_info["user"]
         self.assertEqual(UserID.from_string(user_id), user)
 
+        # TODO: device_id should come from the macaroon, but currently comes
+        # from the db.
+        self.assertEqual(user_info["device_id"], "device")
+
     @defer.inlineCallbacks
     def test_get_guest_user_from_macaroon(self):
         user_id = "@baldrick:matrix.org"
@@ -281,7 +289,7 @@ class AuthTestCase(unittest.TestCase):
         macaroon.add_first_party_caveat("gen = 1")
         macaroon.add_first_party_caveat("type = access")
         macaroon.add_first_party_caveat("user_id = %s" % (user,))
-        macaroon.add_first_party_caveat("time < 1")  # ms
+        macaroon.add_first_party_caveat("time < -2000")  # ms
 
         self.hs.clock.now = 5000  # seconds
         self.hs.config.expire_access_token = True
@@ -293,3 +301,32 @@ class AuthTestCase(unittest.TestCase):
             yield self.auth.get_user_from_macaroon(macaroon.serialize())
         self.assertEqual(401, cm.exception.code)
         self.assertIn("Invalid macaroon", cm.exception.msg)
+
+    @defer.inlineCallbacks
+    def test_get_user_from_macaroon_with_valid_duration(self):
+        # TODO(danielwh): Remove this mock when we remove the
+        # get_user_by_access_token fallback.
+        self.store.get_user_by_access_token = Mock(
+            return_value={"name": "@baldrick:matrix.org"}
+        )
+
+        self.store.get_user_by_access_token = Mock(
+            return_value={"name": "@baldrick:matrix.org"}
+        )
+
+        user_id = "@baldrick:matrix.org"
+        macaroon = pymacaroons.Macaroon(
+            location=self.hs.config.server_name,
+            identifier="key",
+            key=self.hs.config.macaroon_secret_key)
+        macaroon.add_first_party_caveat("gen = 1")
+        macaroon.add_first_party_caveat("type = access")
+        macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
+        macaroon.add_first_party_caveat("time < 900000000")  # ms
+
+        self.hs.clock.now = 5000  # seconds
+        self.hs.config.expire_access_token = True
+
+        user_info = yield self.auth.get_user_from_macaroon(macaroon.serialize())
+        user = user_info["user"]
+        self.assertEqual(UserID.from_string(user_id), user)
diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py
index 4329d73974..8f57fbeb23 100644
--- a/tests/config/test_generate.py
+++ b/tests/config/test_generate.py
@@ -30,7 +30,7 @@ class ConfigGenerationTestCase(unittest.TestCase):
         shutil.rmtree(self.dir)
 
     def test_generate_config_generates_files(self):
-        HomeServerConfig.load_config("", [
+        HomeServerConfig.load_or_generate_config("", [
             "--generate-config",
             "-c", self.file,
             "--report-stats=yes",
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index bf46233c5c..161a87d7e3 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -34,6 +34,8 @@ class ConfigLoadingTestCase(unittest.TestCase):
         self.generate_config_and_remove_lines_containing("server_name")
         with self.assertRaises(Exception):
             HomeServerConfig.load_config("", ["-c", self.file])
+        with self.assertRaises(Exception):
+            HomeServerConfig.load_or_generate_config("", ["-c", self.file])
 
     def test_generates_and_loads_macaroon_secret_key(self):
         self.generate_config()
@@ -54,11 +56,24 @@ class ConfigLoadingTestCase(unittest.TestCase):
                 "was: %r" % (config.macaroon_secret_key,)
             )
 
+        config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
+        self.assertTrue(
+            hasattr(config, "macaroon_secret_key"),
+            "Want config to have attr macaroon_secret_key"
+        )
+        if len(config.macaroon_secret_key) < 5:
+            self.fail(
+                "Want macaroon secret key to be string of at least length 5,"
+                "was: %r" % (config.macaroon_secret_key,)
+            )
+
     def test_load_succeeds_if_macaroon_secret_key_missing(self):
         self.generate_config_and_remove_lines_containing("macaroon")
         config1 = HomeServerConfig.load_config("", ["-c", self.file])
         config2 = HomeServerConfig.load_config("", ["-c", self.file])
+        config3 = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
         self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key)
+        self.assertEqual(config1.macaroon_secret_key, config3.macaroon_secret_key)
 
     def test_disable_registration(self):
         self.generate_config()
@@ -70,14 +85,17 @@ class ConfigLoadingTestCase(unittest.TestCase):
         config = HomeServerConfig.load_config("", ["-c", self.file])
         self.assertFalse(config.enable_registration)
 
+        config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
+        self.assertFalse(config.enable_registration)
+
         # Check that either config value is clobbered by the command line.
-        config = HomeServerConfig.load_config("", [
+        config = HomeServerConfig.load_or_generate_config("", [
             "-c", self.file, "--enable-registration"
         ])
         self.assertTrue(config.enable_registration)
 
     def generate_config(self):
-        HomeServerConfig.load_config("", [
+        HomeServerConfig.load_or_generate_config("", [
             "--generate-config",
             "-c", self.file,
             "--report-stats=yes",
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index 7ddbbb9b4a..a884c95f8d 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -30,9 +30,9 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         self.mock_scheduler = Mock()
         hs = Mock()
         hs.get_datastore = Mock(return_value=self.mock_store)
-        self.handler = ApplicationServicesHandler(
-            hs, self.mock_as_api, self.mock_scheduler
-        )
+        hs.get_application_service_api = Mock(return_value=self.mock_as_api)
+        hs.get_application_service_scheduler = Mock(return_value=self.mock_scheduler)
+        self.handler = ApplicationServicesHandler(hs)
 
     @defer.inlineCallbacks
     def test_notify_interested_services(self):
diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py
index 21077cbe9a..4a8cd19acf 100644
--- a/tests/handlers/test_auth.py
+++ b/tests/handlers/test_auth.py
@@ -14,11 +14,13 @@
 # limitations under the License.
 
 import pymacaroons
+from twisted.internet import defer
 
+import synapse
+import synapse.api.errors
 from synapse.handlers.auth import AuthHandler
 from tests import unittest
 from tests.utils import setup_test_homeserver
-from twisted.internet import defer
 
 
 class AuthHandlers(object):
@@ -31,11 +33,12 @@ class AuthTestCase(unittest.TestCase):
     def setUp(self):
         self.hs = yield setup_test_homeserver(handlers=None)
         self.hs.handlers = AuthHandlers(self.hs)
+        self.auth_handler = self.hs.handlers.auth_handler
 
     def test_token_is_a_macaroon(self):
         self.hs.config.macaroon_secret_key = "this key is a huge secret"
 
-        token = self.hs.handlers.auth_handler.generate_access_token("some_user")
+        token = self.auth_handler.generate_access_token("some_user")
         # Check that we can parse the thing with pymacaroons
         macaroon = pymacaroons.Macaroon.deserialize(token)
         # The most basic of sanity checks
@@ -46,7 +49,7 @@ class AuthTestCase(unittest.TestCase):
         self.hs.config.macaroon_secret_key = "this key is a massive secret"
         self.hs.clock.now = 5000
 
-        token = self.hs.handlers.auth_handler.generate_access_token("a_user")
+        token = self.auth_handler.generate_access_token("a_user")
         macaroon = pymacaroons.Macaroon.deserialize(token)
 
         def verify_gen(caveat):
@@ -67,3 +70,46 @@ class AuthTestCase(unittest.TestCase):
         v.satisfy_general(verify_type)
         v.satisfy_general(verify_expiry)
         v.verify(macaroon, self.hs.config.macaroon_secret_key)
+
+    def test_short_term_login_token_gives_user_id(self):
+        self.hs.clock.now = 1000
+
+        token = self.auth_handler.generate_short_term_login_token(
+            "a_user", 5000
+        )
+
+        self.assertEqual(
+            "a_user",
+            self.auth_handler.validate_short_term_login_token_and_get_user_id(
+                token
+            )
+        )
+
+        # when we advance the clock, the token should be rejected
+        self.hs.clock.now = 6000
+        with self.assertRaises(synapse.api.errors.AuthError):
+            self.auth_handler.validate_short_term_login_token_and_get_user_id(
+                token
+            )
+
+    def test_short_term_login_token_cannot_replace_user_id(self):
+        token = self.auth_handler.generate_short_term_login_token(
+            "a_user", 5000
+        )
+        macaroon = pymacaroons.Macaroon.deserialize(token)
+
+        self.assertEqual(
+            "a_user",
+            self.auth_handler.validate_short_term_login_token_and_get_user_id(
+                macaroon.serialize()
+            )
+        )
+
+        # add another "user_id" caveat, which might allow us to override the
+        # user_id.
+        macaroon.add_first_party_caveat("user_id = b_user")
+
+        with self.assertRaises(synapse.api.errors.AuthError):
+            self.auth_handler.validate_short_term_login_token_and_get_user_id(
+                macaroon.serialize()
+            )
diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py
new file mode 100644
index 0000000000..85a970a6c9
--- /dev/null
+++ b/tests/handlers/test_device.py
@@ -0,0 +1,184 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+import synapse.api.errors
+import synapse.handlers.device
+
+import synapse.storage
+from synapse import types
+from tests import unittest, utils
+
+user1 = "@boris:aaa"
+user2 = "@theresa:bbb"
+
+
+class DeviceTestCase(unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+        super(DeviceTestCase, self).__init__(*args, **kwargs)
+        self.store = None    # type: synapse.storage.DataStore
+        self.handler = None  # type: synapse.handlers.device.DeviceHandler
+        self.clock = None    # type: utils.MockClock
+
+    @defer.inlineCallbacks
+    def setUp(self):
+        hs = yield utils.setup_test_homeserver(handlers=None)
+        self.handler = synapse.handlers.device.DeviceHandler(hs)
+        self.store = hs.get_datastore()
+        self.clock = hs.get_clock()
+
+    @defer.inlineCallbacks
+    def test_device_is_created_if_doesnt_exist(self):
+        res = yield self.handler.check_device_registered(
+            user_id="boris",
+            device_id="fco",
+            initial_device_display_name="display name"
+        )
+        self.assertEqual(res, "fco")
+
+        dev = yield self.handler.store.get_device("boris", "fco")
+        self.assertEqual(dev["display_name"], "display name")
+
+    @defer.inlineCallbacks
+    def test_device_is_preserved_if_exists(self):
+        res1 = yield self.handler.check_device_registered(
+            user_id="boris",
+            device_id="fco",
+            initial_device_display_name="display name"
+        )
+        self.assertEqual(res1, "fco")
+
+        res2 = yield self.handler.check_device_registered(
+            user_id="boris",
+            device_id="fco",
+            initial_device_display_name="new display name"
+        )
+        self.assertEqual(res2, "fco")
+
+        dev = yield self.handler.store.get_device("boris", "fco")
+        self.assertEqual(dev["display_name"], "display name")
+
+    @defer.inlineCallbacks
+    def test_device_id_is_made_up_if_unspecified(self):
+        device_id = yield self.handler.check_device_registered(
+            user_id="theresa",
+            device_id=None,
+            initial_device_display_name="display"
+        )
+
+        dev = yield self.handler.store.get_device("theresa", device_id)
+        self.assertEqual(dev["display_name"], "display")
+
+    @defer.inlineCallbacks
+    def test_get_devices_by_user(self):
+        yield self._record_users()
+
+        res = yield self.handler.get_devices_by_user(user1)
+        self.assertEqual(3, len(res))
+        device_map = {
+            d["device_id"]: d for d in res
+        }
+        self.assertDictContainsSubset({
+            "user_id": user1,
+            "device_id": "xyz",
+            "display_name": "display 0",
+            "last_seen_ip": None,
+            "last_seen_ts": None,
+        }, device_map["xyz"])
+        self.assertDictContainsSubset({
+            "user_id": user1,
+            "device_id": "fco",
+            "display_name": "display 1",
+            "last_seen_ip": "ip1",
+            "last_seen_ts": 1000000,
+        }, device_map["fco"])
+        self.assertDictContainsSubset({
+            "user_id": user1,
+            "device_id": "abc",
+            "display_name": "display 2",
+            "last_seen_ip": "ip3",
+            "last_seen_ts": 3000000,
+        }, device_map["abc"])
+
+    @defer.inlineCallbacks
+    def test_get_device(self):
+        yield self._record_users()
+
+        res = yield self.handler.get_device(user1, "abc")
+        self.assertDictContainsSubset({
+            "user_id": user1,
+            "device_id": "abc",
+            "display_name": "display 2",
+            "last_seen_ip": "ip3",
+            "last_seen_ts": 3000000,
+        }, res)
+
+    @defer.inlineCallbacks
+    def test_delete_device(self):
+        yield self._record_users()
+
+        # delete the device
+        yield self.handler.delete_device(user1, "abc")
+
+        # check the device was deleted
+        with self.assertRaises(synapse.api.errors.NotFoundError):
+            yield self.handler.get_device(user1, "abc")
+
+        # we'd like to check the access token was invalidated, but that's a
+        # bit of a PITA.
+
+    @defer.inlineCallbacks
+    def test_update_device(self):
+        yield self._record_users()
+
+        update = {"display_name": "new display"}
+        yield self.handler.update_device(user1, "abc", update)
+
+        res = yield self.handler.get_device(user1, "abc")
+        self.assertEqual(res["display_name"], "new display")
+
+    @defer.inlineCallbacks
+    def test_update_unknown_device(self):
+        update = {"display_name": "new_display"}
+        with self.assertRaises(synapse.api.errors.NotFoundError):
+            yield self.handler.update_device("user_id", "unknown_device_id",
+                                             update)
+
+    @defer.inlineCallbacks
+    def _record_users(self):
+        # check this works for both devices which have a recorded client_ip,
+        # and those which don't.
+        yield self._record_user(user1, "xyz", "display 0")
+        yield self._record_user(user1, "fco", "display 1", "token1", "ip1")
+        yield self._record_user(user1, "abc", "display 2", "token2", "ip2")
+        yield self._record_user(user1, "abc", "display 2", "token3", "ip3")
+
+        yield self._record_user(user2, "def", "dispkay", "token4", "ip4")
+
+    @defer.inlineCallbacks
+    def _record_user(self, user_id, device_id, display_name,
+                     access_token=None, ip=None):
+        device_id = yield self.handler.check_device_registered(
+            user_id=user_id,
+            device_id=device_id,
+            initial_device_display_name=display_name
+        )
+
+        if ip is not None:
+            yield self.store.insert_client_ip(
+                types.UserID.from_string(user_id),
+                access_token, ip, "user_agent", device_id)
+            self.clock.advance_time(1000)
diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
new file mode 100644
index 0000000000..878a54dc34
--- /dev/null
+++ b/tests/handlers/test_e2e_keys.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+from twisted.internet import defer
+
+import synapse.api.errors
+import synapse.handlers.e2e_keys
+
+import synapse.storage
+from tests import unittest, utils
+
+
+class E2eKeysHandlerTestCase(unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+        super(E2eKeysHandlerTestCase, self).__init__(*args, **kwargs)
+        self.hs = None       # type: synapse.server.HomeServer
+        self.handler = None  # type: synapse.handlers.e2e_keys.E2eKeysHandler
+
+    @defer.inlineCallbacks
+    def setUp(self):
+        self.hs = yield utils.setup_test_homeserver(
+            handlers=None,
+            replication_layer=mock.Mock(),
+        )
+        self.handler = synapse.handlers.e2e_keys.E2eKeysHandler(self.hs)
+
+    @defer.inlineCallbacks
+    def test_query_local_devices_no_devices(self):
+        """If the user has no devices, we expect an empty list.
+        """
+        local_user = "@boris:" + self.hs.hostname
+        res = yield self.handler.query_local_devices({local_user: None})
+        self.assertDictEqual(res, {local_user: {}})
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index 87c795fcfa..b531ba8540 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -264,7 +264,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         )
 
         new_state = handle_timeout(
-            state, is_mine=True, user_to_num_current_syncs={}, now=now
+            state, is_mine=True, syncing_user_ids=set(), now=now
         )
 
         self.assertIsNotNone(new_state)
@@ -282,7 +282,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         )
 
         new_state = handle_timeout(
-            state, is_mine=True, user_to_num_current_syncs={}, now=now
+            state, is_mine=True, syncing_user_ids=set(), now=now
         )
 
         self.assertIsNotNone(new_state)
@@ -300,9 +300,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         )
 
         new_state = handle_timeout(
-            state, is_mine=True, user_to_num_current_syncs={
-                user_id: 1,
-            }, now=now
+            state, is_mine=True, syncing_user_ids=set([user_id]), now=now
         )
 
         self.assertIsNotNone(new_state)
@@ -321,7 +319,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         )
 
         new_state = handle_timeout(
-            state, is_mine=True, user_to_num_current_syncs={}, now=now
+            state, is_mine=True, syncing_user_ids=set(), now=now
         )
 
         self.assertIsNotNone(new_state)
@@ -340,7 +338,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         )
 
         new_state = handle_timeout(
-            state, is_mine=True, user_to_num_current_syncs={}, now=now
+            state, is_mine=True, syncing_user_ids=set(), now=now
         )
 
         self.assertIsNone(new_state)
@@ -358,7 +356,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         )
 
         new_state = handle_timeout(
-            state, is_mine=False, user_to_num_current_syncs={}, now=now
+            state, is_mine=False, syncing_user_ids=set(), now=now
         )
 
         self.assertIsNotNone(new_state)
@@ -377,7 +375,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         )
 
         new_state = handle_timeout(
-            state, is_mine=True, user_to_num_current_syncs={}, now=now
+            state, is_mine=True, syncing_user_ids=set(), now=now
         )
 
         self.assertIsNotNone(new_state)
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index 4f2c14e4ff..f1f664275f 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -19,11 +19,12 @@ from twisted.internet import defer
 
 from mock import Mock, NonCallableMock
 
+import synapse.types
 from synapse.api.errors import AuthError
 from synapse.handlers.profile import ProfileHandler
 from synapse.types import UserID
 
-from tests.utils import setup_test_homeserver, requester_for_user
+from tests.utils import setup_test_homeserver
 
 
 class ProfileHandlers(object):
@@ -86,7 +87,7 @@ class ProfileTestCase(unittest.TestCase):
     def test_set_my_name(self):
         yield self.handler.set_displayname(
             self.frank,
-            requester_for_user(self.frank),
+            synapse.types.create_requester(self.frank),
             "Frank Jr."
         )
 
@@ -99,7 +100,7 @@ class ProfileTestCase(unittest.TestCase):
     def test_set_my_name_noauth(self):
         d = self.handler.set_displayname(
             self.frank,
-            requester_for_user(self.bob),
+            synapse.types.create_requester(self.bob),
             "Frank Jr."
         )
 
@@ -144,7 +145,8 @@ class ProfileTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_set_my_avatar(self):
         yield self.handler.set_avatar_url(
-            self.frank, requester_for_user(self.frank), "http://my.server/pic.gif"
+            self.frank, synapse.types.create_requester(self.frank),
+            "http://my.server/pic.gif"
         )
 
         self.assertEquals(
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 8b7be96bd9..a7de3c7c17 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -17,6 +17,7 @@ from twisted.internet import defer
 from .. import unittest
 
 from synapse.handlers.register import RegistrationHandler
+from synapse.types import UserID
 
 from tests.utils import setup_test_homeserver
 
@@ -36,31 +37,43 @@ class RegistrationTestCase(unittest.TestCase):
         self.mock_distributor = Mock()
         self.mock_distributor.declare("registered_user")
         self.mock_captcha_client = Mock()
-        hs = yield setup_test_homeserver(
+        self.hs = yield setup_test_homeserver(
             handlers=None,
             http_client=None,
             expire_access_token=True)
-        hs.handlers = RegistrationHandlers(hs)
-        self.handler = hs.get_handlers().registration_handler
-        hs.get_handlers().profile_handler = Mock()
+        self.auth_handler = Mock(
+            generate_access_token=Mock(return_value='secret'))
+        self.hs.handlers = RegistrationHandlers(self.hs)
+        self.handler = self.hs.get_handlers().registration_handler
+        self.hs.get_handlers().profile_handler = Mock()
         self.mock_handler = Mock(spec=[
-            "generate_short_term_login_token",
+            "generate_access_token",
         ])
-
-        hs.get_handlers().auth_handler = self.mock_handler
+        self.hs.get_auth_handler = Mock(return_value=self.auth_handler)
 
     @defer.inlineCallbacks
     def test_user_is_created_and_logged_in_if_doesnt_exist(self):
-        """
-        Returns:
-            The user doess not exist in this case so it will register and log it in
-        """
         duration_ms = 200
         local_part = "someone"
         display_name = "someone"
         user_id = "@someone:test"
-        mock_token = self.mock_handler.generate_short_term_login_token
-        mock_token.return_value = 'secret'
+        result_user_id, result_token = yield self.handler.get_or_create_user(
+            local_part, display_name, duration_ms)
+        self.assertEquals(result_user_id, user_id)
+        self.assertEquals(result_token, 'secret')
+
+    @defer.inlineCallbacks
+    def test_if_user_exists(self):
+        store = self.hs.get_datastore()
+        frank = UserID.from_string("@frank:test")
+        yield store.register(
+            user_id=frank.to_string(),
+            token="jkv;g498752-43gj['eamb!-5",
+            password_hash=None)
+        duration_ms = 200
+        local_part = "frank"
+        display_name = "Frank"
+        user_id = "@frank:test"
         result_user_id, result_token = yield self.handler.get_or_create_user(
             local_part, display_name, duration_ms)
         self.assertEquals(result_user_id, user_id)
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index abb739ae52..ab9899b7d5 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -251,12 +251,12 @@ class TypingNotificationsTestCase(unittest.TestCase):
 
         # Gut-wrenching
         from synapse.handlers.typing import RoomMember
-        member = RoomMember(self.room_id, self.u_apple)
+        member = RoomMember(self.room_id, self.u_apple.to_string())
         self.handler._member_typing_until[member] = 1002000
         self.handler._member_typing_timer[member] = (
             self.clock.call_later(1002, lambda: 0)
         )
-        self.handler._room_typing[self.room_id] = set((self.u_apple,))
+        self.handler._room_typing[self.room_id] = set((self.u_apple.to_string(),))
 
         self.assertEquals(self.event_source.get_current_key(), 0)
 
diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py
index f3c1927ce1..f85455a5af 100644
--- a/tests/metrics/test_metric.py
+++ b/tests/metrics/test_metric.py
@@ -61,9 +61,6 @@ class CounterMetricTestCase(unittest.TestCase):
             'vector{method="PUT"} 1',
         ])
 
-        # Check that passing too few values errors
-        self.assertRaises(ValueError, counter.inc)
-
 
 class CallbackMetricTestCase(unittest.TestCase):
 
@@ -138,27 +135,27 @@ class CacheMetricTestCase(unittest.TestCase):
     def test_cache(self):
         d = dict()
 
-        metric = CacheMetric("cache", lambda: len(d))
+        metric = CacheMetric("cache", lambda: len(d), "cache_name")
 
         self.assertEquals(metric.render(), [
-            'cache:hits 0',
-            'cache:total 0',
-            'cache:size 0',
+            'cache:hits{name="cache_name"} 0',
+            'cache:total{name="cache_name"} 0',
+            'cache:size{name="cache_name"} 0',
         ])
 
         metric.inc_misses()
         d["key"] = "value"
 
         self.assertEquals(metric.render(), [
-            'cache:hits 0',
-            'cache:total 1',
-            'cache:size 1',
+            'cache:hits{name="cache_name"} 0',
+            'cache:total{name="cache_name"} 1',
+            'cache:size{name="cache_name"} 1',
         ])
 
         metric.inc_hits()
 
         self.assertEquals(metric.render(), [
-            'cache:hits 1',
-            'cache:total 2',
-            'cache:size 1',
+            'cache:hits{name="cache_name"} 1',
+            'cache:total{name="cache_name"} 2',
+            'cache:size{name="cache_name"} 1',
         ])
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index 17587fda00..f33e6f60fb 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -59,47 +59,6 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
         [unpatch() for unpatch in self.unpatches]
 
     @defer.inlineCallbacks
-    def test_room_name_and_aliases(self):
-        create = yield self.persist(type="m.room.create", key="", creator=USER_ID)
-        yield self.persist(type="m.room.member", key=USER_ID, membership="join")
-        yield self.persist(type="m.room.name", key="", name="name1")
-        yield self.persist(
-            type="m.room.aliases", key="blue", aliases=["#1:blue"]
-        )
-        yield self.replicate()
-        yield self.check(
-            "get_room_name_and_aliases", (ROOM_ID,), ("name1", ["#1:blue"])
-        )
-
-        # Set the room name.
-        yield self.persist(type="m.room.name", key="", name="name2")
-        yield self.replicate()
-        yield self.check(
-            "get_room_name_and_aliases", (ROOM_ID,), ("name2", ["#1:blue"])
-        )
-
-        # Set the room aliases.
-        yield self.persist(
-            type="m.room.aliases", key="blue", aliases=["#2:blue"]
-        )
-        yield self.replicate()
-        yield self.check(
-            "get_room_name_and_aliases", (ROOM_ID,), ("name2", ["#2:blue"])
-        )
-
-        # Leave and join the room clobbering the state.
-        yield self.persist(type="m.room.member", key=USER_ID, membership="leave")
-        yield self.persist(
-            type="m.room.member", key=USER_ID, membership="join",
-            reset_state=[create]
-        )
-        yield self.replicate()
-
-        yield self.check(
-            "get_room_name_and_aliases", (ROOM_ID,), (None, [])
-        )
-
-    @defer.inlineCallbacks
     def test_room_members(self):
         create = yield self.persist(type="m.room.create", key="", creator=USER_ID)
         yield self.replicate()
diff --git a/tests/replication/test_resource.py b/tests/replication/test_resource.py
index 842e3d29d7..e70ac6f14d 100644
--- a/tests/replication/test_resource.py
+++ b/tests/replication/test_resource.py
@@ -13,15 +13,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.replication.resource import ReplicationResource
-from synapse.types import Requester, UserID
+import contextlib
+import json
 
+from mock import Mock, NonCallableMock
 from twisted.internet import defer
+
+import synapse.types
+from synapse.replication.resource import ReplicationResource
+from synapse.types import UserID
 from tests import unittest
-from tests.utils import setup_test_homeserver, requester_for_user
-from mock import Mock, NonCallableMock
-import json
-import contextlib
+from tests.utils import setup_test_homeserver
 
 
 class ReplicationResourceCase(unittest.TestCase):
@@ -61,7 +63,7 @@ class ReplicationResourceCase(unittest.TestCase):
     def test_events_and_state(self):
         get = self.get(events="-1", state="-1", timeout="0")
         yield self.hs.get_handlers().room_creation_handler.create_room(
-            Requester(self.user, "", False), {}
+            synapse.types.create_requester(self.user), {}
         )
         code, body = yield get
         self.assertEquals(code, 200)
@@ -144,7 +146,7 @@ class ReplicationResourceCase(unittest.TestCase):
     def send_text_message(self, room_id, message):
         handler = self.hs.get_handlers().message_handler
         event = yield handler.create_and_send_nonmember_event(
-            requester_for_user(self.user),
+            synapse.types.create_requester(self.user),
             {
                 "type": "m.room.message",
                 "content": {"body": "message", "msgtype": "m.text"},
@@ -157,7 +159,7 @@ class ReplicationResourceCase(unittest.TestCase):
     @defer.inlineCallbacks
     def create_room(self):
         result = yield self.hs.get_handlers().room_creation_handler.create_room(
-            Requester(self.user, "", False), {}
+            synapse.types.create_requester(self.user), {}
         )
         defer.returnValue(result["room_id"])
 
diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py
index af02fce8fb..1e95e97538 100644
--- a/tests/rest/client/v1/test_profile.py
+++ b/tests/rest/client/v1/test_profile.py
@@ -14,17 +14,14 @@
 # limitations under the License.
 
 """Tests REST events for /profile paths."""
-from tests import unittest
-from twisted.internet import defer
-
 from mock import Mock
+from twisted.internet import defer
 
-from ....utils import MockHttpResource, setup_test_homeserver
-
+import synapse.types
 from synapse.api.errors import SynapseError, AuthError
-from synapse.types import Requester, UserID
-
 from synapse.rest.client.v1 import profile
+from tests import unittest
+from ....utils import MockHttpResource, setup_test_homeserver
 
 myid = "@1234ABCD:test"
 PATH_PREFIX = "/_matrix/client/api/v1"
@@ -52,7 +49,7 @@ class ProfileTestCase(unittest.TestCase):
         )
 
         def _get_user_by_req(request=None, allow_guest=False):
-            return Requester(UserID.from_string(myid), "", False)
+            return synapse.types.create_requester(myid)
 
         hs.get_v1auth().get_user_by_req = _get_user_by_req
 
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index affd42c015..8ac56a1fb2 100644
--- a/tests/rest/client/v2_alpha/test_register.py
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -30,10 +30,10 @@ class RegisterRestServletTestCase(unittest.TestCase):
         self.registration_handler = Mock()
         self.identity_handler = Mock()
         self.login_handler = Mock()
+        self.device_handler = Mock()
 
         # do the dance to hook it up to the hs global
         self.handlers = Mock(
-            auth_handler=self.auth_handler,
             registration_handler=self.registration_handler,
             identity_handler=self.identity_handler,
             login_handler=self.login_handler
@@ -42,6 +42,8 @@ class RegisterRestServletTestCase(unittest.TestCase):
         self.hs.hostname = "superbig~testing~thing.com"
         self.hs.get_auth = Mock(return_value=self.auth)
         self.hs.get_handlers = Mock(return_value=self.handlers)
+        self.hs.get_auth_handler = Mock(return_value=self.auth_handler)
+        self.hs.get_device_handler = Mock(return_value=self.device_handler)
         self.hs.config.enable_registration = True
 
         # init the thing we're testing
@@ -61,13 +63,18 @@ class RegisterRestServletTestCase(unittest.TestCase):
             "id": "1234"
         }
         self.registration_handler.appservice_register = Mock(
-            return_value=(user_id, token)
+            return_value=user_id
         )
+        self.auth_handler.get_login_tuple_for_user_id = Mock(
+            return_value=(token, "kermits_refresh_token")
+        )
+
         (code, result) = yield self.servlet.on_POST(self.request)
         self.assertEquals(code, 200)
         det_data = {
             "user_id": user_id,
             "access_token": token,
+            "refresh_token": "kermits_refresh_token",
             "home_server": self.hs.hostname
         }
         self.assertDictContainsSubset(det_data, result)
@@ -105,26 +112,37 @@ class RegisterRestServletTestCase(unittest.TestCase):
     def test_POST_user_valid(self):
         user_id = "@kermit:muppet"
         token = "kermits_access_token"
+        device_id = "frogfone"
         self.request_data = json.dumps({
             "username": "kermit",
-            "password": "monkey"
+            "password": "monkey",
+            "device_id": device_id,
         })
         self.registration_handler.check_username = Mock(return_value=True)
         self.auth_result = (True, None, {
             "username": "kermit",
             "password": "monkey"
         }, None)
-        self.registration_handler.register = Mock(return_value=(user_id, token))
+        self.registration_handler.register = Mock(return_value=(user_id, None))
+        self.auth_handler.get_login_tuple_for_user_id = Mock(
+            return_value=(token, "kermits_refresh_token")
+        )
+        self.device_handler.check_device_registered = \
+            Mock(return_value=device_id)
 
         (code, result) = yield self.servlet.on_POST(self.request)
         self.assertEquals(code, 200)
         det_data = {
             "user_id": user_id,
             "access_token": token,
-            "home_server": self.hs.hostname
+            "refresh_token": "kermits_refresh_token",
+            "home_server": self.hs.hostname,
+            "device_id": device_id,
         }
         self.assertDictContainsSubset(det_data, result)
         self.assertIn("refresh_token", result)
+        self.auth_handler.get_login_tuple_for_user_id(
+            user_id, device_id=device_id, initial_device_display_name=None)
 
     def test_POST_disabled_registration(self):
         self.hs.config.enable_registration = False
diff --git a/tests/storage/event_injector.py b/tests/storage/event_injector.py
index f22ba8db89..38556da9a7 100644
--- a/tests/storage/event_injector.py
+++ b/tests/storage/event_injector.py
@@ -30,6 +30,7 @@ class EventInjector:
     def create_room(self, room):
         builder = self.event_builder_factory.new({
             "type": EventTypes.Create,
+            "sender": "",
             "room_id": room.to_string(),
             "content": {},
         })
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index 5734198121..3e2862daae 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -357,7 +357,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         other_events = [Mock(event_id="e5"), Mock(event_id="e6")]
 
         # we aren't testing store._base stuff here, so mock this out
-        self.store._get_events_txn = Mock(return_value=events)
+        self.store._get_events = Mock(return_value=events)
 
         yield self._insert_txn(self.as_list[1]["id"], 9, other_events)
         yield self._insert_txn(service.id, 10, events)
diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py
index 6e4d9b1373..1286b4ce2d 100644
--- a/tests/storage/test_background_update.py
+++ b/tests/storage/test_background_update.py
@@ -10,7 +10,7 @@ class BackgroundUpdateTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def setUp(self):
-        hs = yield setup_test_homeserver()
+        hs = yield setup_test_homeserver()  # type: synapse.server.HomeServer
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
 
@@ -20,11 +20,20 @@ class BackgroundUpdateTestCase(unittest.TestCase):
             "test_update", self.update_handler
         )
 
+        # run the real background updates, to get them out the way
+        # (perhaps we should run them as part of the test HS setup, since we
+        # run all of the other schema setup stuff there?)
+        while True:
+            res = yield self.store.do_next_background_update(1000)
+            if res is None:
+                break
+
     @defer.inlineCallbacks
     def test_do_background_update(self):
         desired_count = 1000
         duration_ms = 42
 
+        # first step: make a bit of progress
         @defer.inlineCallbacks
         def update(progress, count):
             self.clock.advance_time_msec(count * duration_ms)
@@ -42,7 +51,7 @@ class BackgroundUpdateTestCase(unittest.TestCase):
         yield self.store.start_background_update("test_update", {"my_key": 1})
 
         self.update_handler.reset_mock()
-        result = yield self.store.do_background_update(
+        result = yield self.store.do_next_background_update(
             duration_ms * desired_count
         )
         self.assertIsNotNone(result)
@@ -50,15 +59,15 @@ class BackgroundUpdateTestCase(unittest.TestCase):
             {"my_key": 1}, self.store.DEFAULT_BACKGROUND_BATCH_SIZE
         )
 
+        # second step: complete the update
         @defer.inlineCallbacks
         def update(progress, count):
             yield self.store._end_background_update("test_update")
             defer.returnValue(count)
 
         self.update_handler.side_effect = update
-
         self.update_handler.reset_mock()
-        result = yield self.store.do_background_update(
+        result = yield self.store.do_next_background_update(
             duration_ms * desired_count
         )
         self.assertIsNotNone(result)
@@ -66,8 +75,9 @@ class BackgroundUpdateTestCase(unittest.TestCase):
             {"my_key": 2}, desired_count
         )
 
+        # third step: we don't expect to be called any more
         self.update_handler.reset_mock()
-        result = yield self.store.do_background_update(
+        result = yield self.store.do_next_background_update(
             duration_ms * desired_count
         )
         self.assertIsNone(result)
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
new file mode 100644
index 0000000000..1f0c0e7c37
--- /dev/null
+++ b/tests/storage/test_client_ips.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+import synapse.server
+import synapse.storage
+import synapse.types
+import tests.unittest
+import tests.utils
+
+
+class ClientIpStoreTestCase(tests.unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+        super(ClientIpStoreTestCase, self).__init__(*args, **kwargs)
+        self.store = None  # type: synapse.storage.DataStore
+        self.clock = None  # type: tests.utils.MockClock
+
+    @defer.inlineCallbacks
+    def setUp(self):
+        hs = yield tests.utils.setup_test_homeserver()
+        self.store = hs.get_datastore()
+        self.clock = hs.get_clock()
+
+    @defer.inlineCallbacks
+    def test_insert_new_client_ip(self):
+        self.clock.now = 12345678
+        user_id = "@user:id"
+        yield self.store.insert_client_ip(
+            synapse.types.UserID.from_string(user_id),
+            "access_token", "ip", "user_agent", "device_id",
+        )
+
+        # deliberately use an iterable here to make sure that the lookup
+        # method doesn't iterate it twice
+        device_list = iter(((user_id, "device_id"),))
+        result = yield self.store.get_last_client_ip_by_device(device_list)
+
+        r = result[(user_id, "device_id")]
+        self.assertDictContainsSubset(
+            {
+                "user_id": user_id,
+                "device_id": "device_id",
+                "access_token": "access_token",
+                "ip": "ip",
+                "user_agent": "user_agent",
+                "last_seen": 12345678000,
+            },
+            r
+        )
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
new file mode 100644
index 0000000000..f8725acea0
--- /dev/null
+++ b/tests/storage/test_devices.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+import synapse.api.errors
+import tests.unittest
+import tests.utils
+
+
+class DeviceStoreTestCase(tests.unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+        super(DeviceStoreTestCase, self).__init__(*args, **kwargs)
+        self.store = None  # type: synapse.storage.DataStore
+
+    @defer.inlineCallbacks
+    def setUp(self):
+        hs = yield tests.utils.setup_test_homeserver()
+
+        self.store = hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def test_store_new_device(self):
+        yield self.store.store_device(
+            "user_id", "device_id", "display_name"
+        )
+
+        res = yield self.store.get_device("user_id", "device_id")
+        self.assertDictContainsSubset({
+            "user_id": "user_id",
+            "device_id": "device_id",
+            "display_name": "display_name",
+        }, res)
+
+    @defer.inlineCallbacks
+    def test_get_devices_by_user(self):
+        yield self.store.store_device(
+            "user_id", "device1", "display_name 1"
+        )
+        yield self.store.store_device(
+            "user_id", "device2", "display_name 2"
+        )
+        yield self.store.store_device(
+            "user_id2", "device3", "display_name 3"
+        )
+
+        res = yield self.store.get_devices_by_user("user_id")
+        self.assertEqual(2, len(res.keys()))
+        self.assertDictContainsSubset({
+            "user_id": "user_id",
+            "device_id": "device1",
+            "display_name": "display_name 1",
+        }, res["device1"])
+        self.assertDictContainsSubset({
+            "user_id": "user_id",
+            "device_id": "device2",
+            "display_name": "display_name 2",
+        }, res["device2"])
+
+    @defer.inlineCallbacks
+    def test_update_device(self):
+        yield self.store.store_device(
+            "user_id", "device_id", "display_name 1"
+        )
+
+        res = yield self.store.get_device("user_id", "device_id")
+        self.assertEqual("display_name 1", res["display_name"])
+
+        # do a no-op first
+        yield self.store.update_device(
+            "user_id", "device_id",
+        )
+        res = yield self.store.get_device("user_id", "device_id")
+        self.assertEqual("display_name 1", res["display_name"])
+
+        # do the update
+        yield self.store.update_device(
+            "user_id", "device_id",
+            new_display_name="display_name 2",
+        )
+
+        # check it worked
+        res = yield self.store.get_device("user_id", "device_id")
+        self.assertEqual("display_name 2", res["display_name"])
+
+    @defer.inlineCallbacks
+    def test_update_unknown_device(self):
+        with self.assertRaises(synapse.api.errors.StoreError) as cm:
+            yield self.store.update_device(
+                "user_id", "unknown_device_id",
+                new_display_name="display_name 2",
+            )
+        self.assertEqual(404, cm.exception.code)
diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py
new file mode 100644
index 0000000000..453bc61438
--- /dev/null
+++ b/tests/storage/test_end_to_end_keys.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+import tests.unittest
+import tests.utils
+
+
+class EndToEndKeyStoreTestCase(tests.unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+        super(EndToEndKeyStoreTestCase, self).__init__(*args, **kwargs)
+        self.store = None  # type: synapse.storage.DataStore
+
+    @defer.inlineCallbacks
+    def setUp(self):
+        hs = yield tests.utils.setup_test_homeserver()
+
+        self.store = hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def test_key_without_device_name(self):
+        now = 1470174257070
+        json = '{ "key": "value" }'
+
+        yield self.store.set_e2e_device_keys(
+            "user", "device", now, json)
+
+        res = yield self.store.get_e2e_device_keys((("user", "device"),))
+        self.assertIn("user", res)
+        self.assertIn("device", res["user"])
+        dev = res["user"]["device"]
+        self.assertDictContainsSubset({
+            "key_json": json,
+            "device_display_name": None,
+        }, dev)
+
+    @defer.inlineCallbacks
+    def test_get_key_with_device_name(self):
+        now = 1470174257070
+        json = '{ "key": "value" }'
+
+        yield self.store.set_e2e_device_keys(
+            "user", "device", now, json)
+        yield self.store.store_device(
+            "user", "device", "display_name"
+        )
+
+        res = yield self.store.get_e2e_device_keys((("user", "device"),))
+        self.assertIn("user", res)
+        self.assertIn("device", res["user"])
+        dev = res["user"]["device"]
+        self.assertDictContainsSubset({
+            "key_json": json,
+            "device_display_name": "display_name",
+        }, dev)
+
+    @defer.inlineCallbacks
+    def test_multiple_devices(self):
+        now = 1470174257070
+
+        yield self.store.set_e2e_device_keys(
+            "user1", "device1", now, 'json11')
+        yield self.store.set_e2e_device_keys(
+            "user1", "device2", now, 'json12')
+        yield self.store.set_e2e_device_keys(
+            "user2", "device1", now, 'json21')
+        yield self.store.set_e2e_device_keys(
+            "user2", "device2", now, 'json22')
+
+        res = yield self.store.get_e2e_device_keys((("user1", "device1"),
+                                                    ("user2", "device2")))
+        self.assertIn("user1", res)
+        self.assertIn("device1", res["user1"])
+        self.assertNotIn("device2", res["user1"])
+        self.assertIn("user2", res)
+        self.assertNotIn("device1", res["user2"])
+        self.assertIn("device2", res["user2"])
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
new file mode 100644
index 0000000000..e9044afa2e
--- /dev/null
+++ b/tests/storage/test_event_push_actions.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+import tests.unittest
+import tests.utils
+
+USER_ID = "@user:example.com"
+
+
+class EventPushActionsStoreTestCase(tests.unittest.TestCase):
+
+    @defer.inlineCallbacks
+    def setUp(self):
+        hs = yield tests.utils.setup_test_homeserver()
+        self.store = hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def test_get_unread_push_actions_for_user_in_range_for_http(self):
+        yield self.store.get_unread_push_actions_for_user_in_range_for_http(
+            USER_ID, 0, 1000, 20
+        )
+
+    @defer.inlineCallbacks
+    def test_get_unread_push_actions_for_user_in_range_for_email(self):
+        yield self.store.get_unread_push_actions_for_user_in_range_for_email(
+            USER_ID, 0, 1000, 20
+        )
diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py
index 18a6cff0c7..3762b38e37 100644
--- a/tests/storage/test_events.py
+++ b/tests/storage/test_events.py
@@ -37,7 +37,7 @@ class EventsStoreTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_count_daily_messages(self):
-        self.db_pool.runQuery("DELETE FROM stats_reporting")
+        yield self.db_pool.runQuery("DELETE FROM stats_reporting")
 
         self.hs.clock.now = 100
 
@@ -60,7 +60,7 @@ class EventsStoreTestCase(unittest.TestCase):
         # it isn't old enough.
         count = yield self.store.count_daily_messages()
         self.assertIsNone(count)
-        self._assert_stats_reporting(1, self.hs.clock.now)
+        yield self._assert_stats_reporting(1, self.hs.clock.now)
 
         # Already reported yesterday, two new events from today.
         yield self.event_injector.inject_message(room, user, "Yeah they are!")
@@ -68,21 +68,21 @@ class EventsStoreTestCase(unittest.TestCase):
         self.hs.clock.now += 60 * 60 * 24
         count = yield self.store.count_daily_messages()
         self.assertEqual(2, count)  # 2 since yesterday
-        self._assert_stats_reporting(3, self.hs.clock.now)  # 3 ever
+        yield self._assert_stats_reporting(3, self.hs.clock.now)  # 3 ever
 
         # Last reported too recently.
         yield self.event_injector.inject_message(room, user, "Who could disagree?")
         self.hs.clock.now += 60 * 60 * 22
         count = yield self.store.count_daily_messages()
         self.assertIsNone(count)
-        self._assert_stats_reporting(4, self.hs.clock.now)
+        yield self._assert_stats_reporting(4, self.hs.clock.now)
 
         # Last reported too long ago
         yield self.event_injector.inject_message(room, user, "No one.")
         self.hs.clock.now += 60 * 60 * 26
         count = yield self.store.count_daily_messages()
         self.assertIsNone(count)
-        self._assert_stats_reporting(5, self.hs.clock.now)
+        yield self._assert_stats_reporting(5, self.hs.clock.now)
 
         # And now let's actually report something
         yield self.event_injector.inject_message(room, user, "Indeed.")
@@ -92,7 +92,7 @@ class EventsStoreTestCase(unittest.TestCase):
         self.hs.clock.now += (60 * 60 * 24) + 50
         count = yield self.store.count_daily_messages()
         self.assertEqual(3, count)
-        self._assert_stats_reporting(8, self.hs.clock.now)
+        yield self._assert_stats_reporting(8, self.hs.clock.now)
 
     @defer.inlineCallbacks
     def _get_last_stream_token(self):
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index b8384c98d8..f7d74dea8e 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -38,6 +38,7 @@ class RegistrationStoreTestCase(unittest.TestCase):
             "BcDeFgHiJkLmNoPqRsTuVwXyZa"
         ]
         self.pwhash = "{xx1}123456789"
+        self.device_id = "akgjhdjklgshg"
 
     @defer.inlineCallbacks
     def test_register(self):
@@ -64,13 +65,15 @@ class RegistrationStoreTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_add_tokens(self):
         yield self.store.register(self.user_id, self.tokens[0], self.pwhash)
-        yield self.store.add_access_token_to_user(self.user_id, self.tokens[1])
+        yield self.store.add_access_token_to_user(self.user_id, self.tokens[1],
+                                                  self.device_id)
 
         result = yield self.store.get_user_by_access_token(self.tokens[1])
 
         self.assertDictContainsSubset(
             {
                 "name": self.user_id,
+                "device_id": self.device_id,
             },
             result
         )
@@ -80,20 +83,24 @@ class RegistrationStoreTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_exchange_refresh_token_valid(self):
         uid = stringutils.random_string(32)
+        device_id = stringutils.random_string(16)
         generator = TokenGenerator()
         last_token = generator.generate(uid)
 
         self.db_pool.runQuery(
-            "INSERT INTO refresh_tokens(user_id, token) VALUES(?,?)",
-            (uid, last_token,))
+            "INSERT INTO refresh_tokens(user_id, token, device_id) "
+            "VALUES(?,?,?)",
+            (uid, last_token, device_id))
 
-        (found_user_id, refresh_token) = yield self.store.exchange_refresh_token(
-            last_token, generator.generate)
+        (found_user_id, refresh_token, device_id) = \
+            yield self.store.exchange_refresh_token(last_token,
+                                                    generator.generate)
         self.assertEqual(uid, found_user_id)
 
         rows = yield self.db_pool.runQuery(
-            "SELECT token FROM refresh_tokens WHERE user_id = ?", (uid, ))
-        self.assertEqual([(refresh_token,)], rows)
+            "SELECT token, device_id FROM refresh_tokens WHERE user_id = ?",
+            (uid, ))
+        self.assertEqual([(refresh_token, device_id)], rows)
         # We issued token 1, then exchanged it for token 2
         expected_refresh_token = u"%s-%d" % (uid, 2,)
         self.assertEqual(expected_refresh_token, refresh_token)
@@ -121,6 +128,40 @@ class RegistrationStoreTestCase(unittest.TestCase):
         with self.assertRaises(StoreError):
             yield self.store.exchange_refresh_token(last_token, generator.generate)
 
+    @defer.inlineCallbacks
+    def test_user_delete_access_tokens(self):
+        # add some tokens
+        generator = TokenGenerator()
+        refresh_token = generator.generate(self.user_id)
+        yield self.store.register(self.user_id, self.tokens[0], self.pwhash)
+        yield self.store.add_access_token_to_user(self.user_id, self.tokens[1],
+                                                  self.device_id)
+        yield self.store.add_refresh_token_to_user(self.user_id, refresh_token,
+                                                   self.device_id)
+
+        # now delete some
+        yield self.store.user_delete_access_tokens(
+            self.user_id, device_id=self.device_id, delete_refresh_tokens=True)
+
+        # check they were deleted
+        user = yield self.store.get_user_by_access_token(self.tokens[1])
+        self.assertIsNone(user, "access token was not deleted by device_id")
+        with self.assertRaises(StoreError):
+            yield self.store.exchange_refresh_token(refresh_token,
+                                                    generator.generate)
+
+        # check the one not associated with the device was not deleted
+        user = yield self.store.get_user_by_access_token(self.tokens[0])
+        self.assertEqual(self.user_id, user["name"])
+
+        # now delete the rest
+        yield self.store.user_delete_access_tokens(
+            self.user_id, delete_refresh_tokens=True)
+
+        user = yield self.store.get_user_by_access_token(self.tokens[0])
+        self.assertIsNone(user,
+                          "access token was not deleted without device_id")
+
 
 class TokenGenerator:
     def __init__(self):
diff --git a/tests/test_preview.py b/tests/test_preview.py
new file mode 100644
index 0000000000..2a801173a0
--- /dev/null
+++ b/tests/test_preview.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import unittest
+
+from synapse.rest.media.v1.preview_url_resource import summarize_paragraphs
+
+
+class PreviewTestCase(unittest.TestCase):
+
+    def test_long_summarize(self):
+        example_paras = [
+            """Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:
+            Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in
+            Troms county, Norway. The administrative centre of the municipality is
+            the city of Tromsø. Outside of Norway, Tromso and Tromsö are
+            alternative spellings of the city.Tromsø is considered the northernmost
+            city in the world with a population above 50,000. The most populous town
+            north of it is Alta, Norway, with a population of 14,272 (2013).""",
+
+            """Tromsø lies in Northern Norway. The municipality has a population of
+            (2015) 72,066, but with an annual influx of students it has over 75,000
+            most of the year. It is the largest urban area in Northern Norway and the
+            third largest north of the Arctic Circle (following Murmansk and Norilsk).
+            Most of Tromsø, including the city centre, is located on the island of
+            Tromsøya, 350 kilometres (217 mi) north of the Arctic Circle. In 2012,
+            Tromsøya had a population of 36,088. Substantial parts of the urban area
+            are also situated on the mainland to the east, and on parts of Kvaløya—a
+            large island to the west. Tromsøya is connected to the mainland by the Tromsø
+            Bridge and the Tromsøysund Tunnel, and to the island of Kvaløya by the
+            Sandnessund Bridge. Tromsø Airport connects the city to many destinations
+            in Europe. The city is warmer than most other places located on the same
+            latitude, due to the warming effect of the Gulf Stream.""",
+
+            """The city centre of Tromsø contains the highest number of old wooden
+            houses in Northern Norway, the oldest house dating from 1789. The Arctic
+            Cathedral, a modern church from 1965, is probably the most famous landmark
+            in Tromsø. The city is a cultural centre for its region, with several
+            festivals taking place in the summer. Some of Norway's best-known
+             musicians, Torbjørn Brundtland and Svein Berge of the electronica duo
+             Röyksopp and Lene Marlin grew up and started their careers in Tromsø.
+             Noted electronic musician Geir Jenssen also hails from Tromsø.""",
+        ]
+
+        desc = summarize_paragraphs(example_paras, min_size=200, max_size=500)
+
+        self.assertEquals(
+            desc,
+            "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:"
+            " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in"
+            " Troms county, Norway. The administrative centre of the municipality is"
+            " the city of Tromsø. Outside of Norway, Tromso and Tromsö are"
+            " alternative spellings of the city.Tromsø is considered the northernmost"
+            " city in the world with a population above 50,000. The most populous town"
+            " north of it is Alta, Norway, with a population of 14,272 (2013)."
+        )
+
+        desc = summarize_paragraphs(example_paras[1:], min_size=200, max_size=500)
+
+        self.assertEquals(
+            desc,
+            "Tromsø lies in Northern Norway. The municipality has a population of"
+            " (2015) 72,066, but with an annual influx of students it has over 75,000"
+            " most of the year. It is the largest urban area in Northern Norway and the"
+            " third largest north of the Arctic Circle (following Murmansk and Norilsk)."
+            " Most of Tromsø, including the city centre, is located on the island of"
+            " Tromsøya, 350 kilometres (217 mi) north of the Arctic Circle. In 2012,"
+            " Tromsøya had a population of 36,088. Substantial parts of the…"
+        )
+
+    def test_short_summarize(self):
+        example_paras = [
+            "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:"
+            " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in"
+            " Troms county, Norway.",
+
+            "Tromsø lies in Northern Norway. The municipality has a population of"
+            " (2015) 72,066, but with an annual influx of students it has over 75,000"
+            " most of the year.",
+
+            "The city centre of Tromsø contains the highest number of old wooden"
+            " houses in Northern Norway, the oldest house dating from 1789. The Arctic"
+            " Cathedral, a modern church from 1965, is probably the most famous landmark"
+            " in Tromsø.",
+        ]
+
+        desc = summarize_paragraphs(example_paras, min_size=200, max_size=500)
+
+        self.assertEquals(
+            desc,
+            "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:"
+            " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in"
+            " Troms county, Norway.\n"
+            "\n"
+            "Tromsø lies in Northern Norway. The municipality has a population of"
+            " (2015) 72,066, but with an annual influx of students it has over 75,000"
+            " most of the year."
+        )
+
+    def test_small_then_large_summarize(self):
+        example_paras = [
+            "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:"
+            " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in"
+            " Troms county, Norway.",
+
+            "Tromsø lies in Northern Norway. The municipality has a population of"
+            " (2015) 72,066, but with an annual influx of students it has over 75,000"
+            " most of the year."
+            " The city centre of Tromsø contains the highest number of old wooden"
+            " houses in Northern Norway, the oldest house dating from 1789. The Arctic"
+            " Cathedral, a modern church from 1965, is probably the most famous landmark"
+            " in Tromsø.",
+        ]
+
+        desc = summarize_paragraphs(example_paras, min_size=200, max_size=500)
+        self.assertEquals(
+            desc,
+            "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:"
+            " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in"
+            " Troms county, Norway.\n"
+            "\n"
+            "Tromsø lies in Northern Norway. The municipality has a population of"
+            " (2015) 72,066, but with an annual influx of students it has over 75,000"
+            " most of the year. The city centre of Tromsø contains the highest number"
+            " of old wooden houses in Northern Norway, the oldest house dating from"
+            " 1789. The Arctic Cathedral, a modern church…"
+        )
diff --git a/tests/unittest.py b/tests/unittest.py
index 5b22abfe74..38715972dd 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -17,13 +17,18 @@ from twisted.trial import unittest
 
 import logging
 
-
 # logging doesn't have a "don't log anything at all EVARRRR setting,
 # but since the highest value is 50, 1000000 should do ;)
 NEVER = 1000000
 
-logging.getLogger().addHandler(logging.StreamHandler())
+handler = logging.StreamHandler()
+handler.setFormatter(logging.Formatter(
+    "%(levelname)s:%(name)s:%(message)s  [%(pathname)s:%(lineno)d]"
+))
+logging.getLogger().addHandler(handler)
 logging.getLogger().setLevel(NEVER)
+logging.getLogger("synapse.storage.SQL").setLevel(NEVER)
+logging.getLogger("synapse.storage.txn").setLevel(NEVER)
 
 
 def around(target):
@@ -70,8 +75,6 @@ class TestCase(unittest.TestCase):
                     return ret
 
             logging.getLogger().setLevel(level)
-            # Don't set SQL logging
-            logging.getLogger("synapse.storage").setLevel(old_level)
             return orig()
 
     def assertObjectHasAttributes(self, attrs, obj):
diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py
new file mode 100644
index 0000000000..1d745ae1a7
--- /dev/null
+++ b/tests/util/test_rwlock.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+
+from synapse.util.async import ReadWriteLock
+
+
+class ReadWriteLockTestCase(unittest.TestCase):
+
+    def _assert_called_before_not_after(self, lst, first_false):
+        for i, d in enumerate(lst[:first_false]):
+            self.assertTrue(d.called, msg="%d was unexpectedly false" % i)
+
+        for i, d in enumerate(lst[first_false:]):
+            self.assertFalse(
+                d.called, msg="%d was unexpectedly true" % (i + first_false)
+            )
+
+    def test_rwlock(self):
+        rwlock = ReadWriteLock()
+
+        key = object()
+
+        ds = [
+            rwlock.read(key),   # 0
+            rwlock.read(key),   # 1
+            rwlock.write(key),  # 2
+            rwlock.write(key),  # 3
+            rwlock.read(key),   # 4
+            rwlock.read(key),   # 5
+            rwlock.write(key),  # 6
+        ]
+
+        self._assert_called_before_not_after(ds, 2)
+
+        with ds[0].result:
+            self._assert_called_before_not_after(ds, 2)
+        self._assert_called_before_not_after(ds, 2)
+
+        with ds[1].result:
+            self._assert_called_before_not_after(ds, 2)
+        self._assert_called_before_not_after(ds, 3)
+
+        with ds[2].result:
+            self._assert_called_before_not_after(ds, 3)
+        self._assert_called_before_not_after(ds, 4)
+
+        with ds[3].result:
+            self._assert_called_before_not_after(ds, 4)
+        self._assert_called_before_not_after(ds, 6)
+
+        with ds[5].result:
+            self._assert_called_before_not_after(ds, 6)
+        self._assert_called_before_not_after(ds, 6)
+
+        with ds[4].result:
+            self._assert_called_before_not_after(ds, 6)
+        self._assert_called_before_not_after(ds, 7)
+
+        with ds[6].result:
+            pass
+
+        d = rwlock.write(key)
+        self.assertTrue(d.called)
+        with d.result:
+            pass
+
+        d = rwlock.read(key)
+        self.assertTrue(d.called)
+        with d.result:
+            pass
diff --git a/tests/utils.py b/tests/utils.py
index 59d985b5f2..915b934e94 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -20,7 +20,6 @@ from synapse.storage.prepare_database import prepare_database
 from synapse.storage.engines import create_engine
 from synapse.server import HomeServer
 from synapse.federation.transport import server
-from synapse.types import Requester
 from synapse.util.ratelimitutils import FederationRateLimiter
 
 from synapse.util.logcontext import LoggingContext
@@ -54,7 +53,9 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
         config.trusted_third_party_id_servers = []
         config.room_invite_state_types = []
 
+    config.use_frozen_dicts = True
     config.database_config = {"name": "sqlite3"}
+    config.ldap_enabled = False
 
     if "clock" not in kargs:
         kargs["clock"] = MockClock()
@@ -67,6 +68,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
             version_string="Synapse/tests",
             database_engine=create_engine(config.database_config),
             get_db_conn=db_pool.get_db_conn,
+            room_list_handler=object(),
             **kargs
         )
         hs.setup()
@@ -75,20 +77,16 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
             name, db_pool=None, datastore=datastore, config=config,
             version_string="Synapse/tests",
             database_engine=create_engine(config.database_config),
+            room_list_handler=object(),
             **kargs
         )
 
     # bcrypt is far too slow to be doing in unit tests
-    def swap_out_hash_for_testing(old_build_handlers):
-        def build_handlers():
-            handlers = old_build_handlers()
-            auth_handler = handlers.auth_handler
-            auth_handler.hash = lambda p: hashlib.md5(p).hexdigest()
-            auth_handler.validate_hash = lambda p, h: hashlib.md5(p).hexdigest() == h
-            return handlers
-        return build_handlers
-
-    hs.build_handlers = swap_out_hash_for_testing(hs.build_handlers)
+    # Need to let the HS build an auth handler and then mess with it
+    # because AuthHandler's constructor requires the HS, so we can't make one
+    # beforehand and pass it in to the HS's constructor (chicken / egg)
+    hs.get_auth_handler().hash = lambda p: hashlib.md5(p).hexdigest()
+    hs.get_auth_handler().validate_hash = lambda p, h: hashlib.md5(p).hexdigest() == h
 
     fed = kargs.get("resource_for_federation", None)
     if fed:
@@ -513,7 +511,3 @@ class DeferredMockCallable(object):
                     "call(%s)" % _format_call(c[0], c[1]) for c in calls
                 ])
             )
-
-
-def requester_for_user(user):
-    return Requester(user, None, False)
diff --git a/tox.ini b/tox.ini
index 757b7189c3..52d93c65e5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -11,7 +11,7 @@ deps =
 setenv =
     PYTHONDONTWRITEBYTECODE = no_byte_code
 commands =
-    /bin/bash -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \
+    /bin/sh -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \
         {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}"
     {env:DUMP_COVERAGE_COMMAND:coverage report -m}
 
@@ -26,4 +26,4 @@ skip_install = True
 basepython = python2.7
 deps =
     flake8
-commands = /bin/bash -c "flake8 synapse tests {env:PEP8SUFFIX:}"
+commands = /bin/sh -c "flake8 synapse tests {env:PEP8SUFFIX:}"