summary refs log tree commit diff
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2019-03-04 11:54:58 +0000
committerErik Johnston <erik@matrix.org>2019-03-04 11:54:58 +0000
commitfbc047f2a5f12ee934e5ccbe7274100aa72166b5 (patch)
tree2eabc4f13032883ff61fc635d0be43292a5ad131
parentUpdate newsfile to have a full stop (diff)
parentUpdate test_typing to use HomeserverTestCase. (#4771) (diff)
downloadsynapse-fbc047f2a5f12ee934e5ccbe7274100aa72166b5.tar.xz
Merge branch 'develop' of github.com:matrix-org/synapse into erikj/stop_fed_not_in_room
-rw-r--r--.buildkite/.env13
-rw-r--r--.buildkite/docker-compose.py27.pg94.yaml21
-rw-r--r--.buildkite/docker-compose.py27.pg95.yaml21
-rw-r--r--.buildkite/docker-compose.py35.pg94.yaml21
-rw-r--r--.buildkite/docker-compose.py35.pg95.yaml21
-rw-r--r--.buildkite/docker-compose.py37.pg11.yaml21
-rw-r--r--.buildkite/docker-compose.py37.pg95.yaml21
-rw-r--r--.buildkite/pipeline.yml149
-rw-r--r--.travis.yml101
-rw-r--r--CHANGES.md59
-rw-r--r--CONTRIBUTING.rst37
-rw-r--r--MANIFEST.in1
-rw-r--r--README.rst2
-rw-r--r--changelog.d/4450.bugfix2
-rw-r--r--changelog.d/4632.feature1
-rw-r--r--changelog.d/4635.misc1
-rw-r--r--changelog.d/4642.feature1
-rw-r--r--changelog.d/4643.misc1
-rw-r--r--changelog.d/4644.misc1
-rw-r--r--changelog.d/4647.feature1
-rw-r--r--changelog.d/4651.bugfix1
-rw-r--r--changelog.d/4652.feature1
-rw-r--r--changelog.d/4657.misc1
-rw-r--r--changelog.d/4666.feature1
-rw-r--r--changelog.d/4667.bugfix1
-rw-r--r--changelog.d/4668.misc1
-rw-r--r--changelog.d/4669.misc1
-rw-r--r--changelog.d/4670.feature1
-rw-r--r--changelog.d/4671.misc1
-rw-r--r--changelog.d/4674.feature1
-rw-r--r--changelog.d/4676.misc1
-rw-r--r--changelog.d/4677.misc1
-rw-r--r--changelog.d/4681.misc1
-rw-r--r--changelog.d/4682.feature1
-rw-r--r--changelog.d/4688.misc1
-rw-r--r--changelog.d/4690.bugfix1
-rw-r--r--changelog.d/4691.misc1
-rw-r--r--changelog.d/4694.feature1
-rw-r--r--changelog.d/4695.feature1
-rw-r--r--changelog.d/4740.bugfix1
-rw-r--r--changelog.d/4749.bugfix1
-rw-r--r--changelog.d/4752.misc1
-rw-r--r--changelog.d/4757.feature1
-rw-r--r--changelog.d/4757.misc1
-rw-r--r--changelog.d/4759.feature1
-rw-r--r--changelog.d/4763.bugfix1
-rw-r--r--changelog.d/4765.misc1
-rw-r--r--changelog.d/4771.misc1
-rw-r--r--changelog.d/4776.bugfix1
-rw-r--r--debian/changelog7
-rw-r--r--debian/install1
-rwxr-xr-xdebian/manage_debconf.pl130
-rwxr-xr-xdebian/matrix-synapse-py3.config (renamed from debian/config)3
-rw-r--r--debian/matrix-synapse-py3.postinst33
-rw-r--r--docs/ACME.md19
-rw-r--r--docs/reverse_proxy.rst20
-rw-r--r--docs/tcp_replication.rst4
-rw-r--r--docs/workers.rst2
-rwxr-xr-xscripts-dev/check-newsfragment41
-rwxr-xr-xscripts/synapse_port_db1
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/app/client_reader.py2
-rw-r--r--synapse/app/federation_reader.py6
-rw-r--r--synapse/app/frontend_proxy.py15
-rwxr-xr-xsynapse/app/homeserver.py3
-rw-r--r--synapse/config/captcha.py2
-rw-r--r--synapse/config/tls.py10
-rw-r--r--synapse/crypto/keyring.py72
-rw-r--r--synapse/federation/federation_client.py18
-rw-r--r--synapse/federation/federation_server.py18
-rw-r--r--synapse/federation/transport/server.py7
-rw-r--r--synapse/groups/groups_server.py6
-rw-r--r--synapse/handlers/federation.py34
-rw-r--r--synapse/handlers/message.py7
-rw-r--r--synapse/handlers/pagination.py8
-rw-r--r--synapse/handlers/receipts.py68
-rw-r--r--synapse/handlers/register.py4
-rw-r--r--synapse/handlers/room_list.py68
-rw-r--r--synapse/http/federation/matrix_federation_agent.py6
-rw-r--r--synapse/http/server.py8
-rw-r--r--synapse/push/httppusher.py38
-rw-r--r--synapse/push/pusher.py2
-rw-r--r--synapse/push/pusherpool.py18
-rw-r--r--synapse/replication/slave/storage/_base.py7
-rw-r--r--synapse/replication/slave/storage/presence.py7
-rw-r--r--synapse/replication/tcp/client.py22
-rw-r--r--synapse/replication/tcp/commands.py5
-rw-r--r--synapse/replication/tcp/protocol.py34
-rw-r--r--synapse/rest/client/v2_alpha/auth.py2
-rw-r--r--synapse/rest/media/v1/_base.py96
-rw-r--r--synapse/server.pyi2
-rw-r--r--synapse/static/client/register/index.html2
-rw-r--r--synapse/storage/_base.py63
-rw-r--r--synapse/storage/engines/postgres.py25
-rw-r--r--synapse/storage/engines/sqlite.py9
-rw-r--r--synapse/storage/registration.py66
-rw-r--r--tests/handlers/test_typing.py290
-rw-r--r--tests/rest/media/v1/test_base.py45
-rw-r--r--tests/server.py21
-rw-r--r--tests/unittest.py8
-rw-r--r--tests/utils.py85
101 files changed, 1462 insertions, 535 deletions
diff --git a/.buildkite/.env b/.buildkite/.env
new file mode 100644
index 0000000000..85b102d07f
--- /dev/null
+++ b/.buildkite/.env
@@ -0,0 +1,13 @@
+CI
+BUILDKITE
+BUILDKITE_BUILD_NUMBER
+BUILDKITE_BRANCH
+BUILDKITE_BUILD_NUMBER
+BUILDKITE_JOB_ID
+BUILDKITE_BUILD_URL
+BUILDKITE_PROJECT_SLUG
+BUILDKITE_COMMIT
+BUILDKITE_PULL_REQUEST
+BUILDKITE_TAG
+CODECOV_TOKEN
+TRIAL_FLAGS
diff --git a/.buildkite/docker-compose.py27.pg94.yaml b/.buildkite/docker-compose.py27.pg94.yaml
new file mode 100644
index 0000000000..2d4b9eadd9
--- /dev/null
+++ b/.buildkite/docker-compose.py27.pg94.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+  postgres:
+    image: postgres:9.4
+    environment:
+      POSTGRES_PASSWORD: postgres
+
+  testenv:
+    image: python:2.7
+    depends_on:
+      - postgres
+    env_file: .env
+    environment:
+      SYNAPSE_POSTGRES_HOST: postgres
+      SYNAPSE_POSTGRES_USER: postgres
+      SYNAPSE_POSTGRES_PASSWORD: postgres
+    working_dir: /app
+    volumes:
+      - ..:/app
diff --git a/.buildkite/docker-compose.py27.pg95.yaml b/.buildkite/docker-compose.py27.pg95.yaml
new file mode 100644
index 0000000000..c6a41f1da0
--- /dev/null
+++ b/.buildkite/docker-compose.py27.pg95.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+  postgres:
+    image: postgres:9.5
+    environment:
+      POSTGRES_PASSWORD: postgres
+
+  testenv:
+    image: python:2.7
+    depends_on:
+      - postgres
+    env_file: .env
+    environment:
+      SYNAPSE_POSTGRES_HOST: postgres
+      SYNAPSE_POSTGRES_USER: postgres
+      SYNAPSE_POSTGRES_PASSWORD: postgres
+    working_dir: /app
+    volumes:
+      - ..:/app
diff --git a/.buildkite/docker-compose.py35.pg94.yaml b/.buildkite/docker-compose.py35.pg94.yaml
new file mode 100644
index 0000000000..978aedd115
--- /dev/null
+++ b/.buildkite/docker-compose.py35.pg94.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+  postgres:
+    image: postgres:9.4
+    environment:
+      POSTGRES_PASSWORD: postgres
+
+  testenv:
+    image: python:3.5
+    depends_on:
+      - postgres
+    env_file: .env
+    environment:
+      SYNAPSE_POSTGRES_HOST: postgres
+      SYNAPSE_POSTGRES_USER: postgres
+      SYNAPSE_POSTGRES_PASSWORD: postgres
+    working_dir: /app
+    volumes:
+      - ..:/app
diff --git a/.buildkite/docker-compose.py35.pg95.yaml b/.buildkite/docker-compose.py35.pg95.yaml
new file mode 100644
index 0000000000..2f14387fbc
--- /dev/null
+++ b/.buildkite/docker-compose.py35.pg95.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+  postgres:
+    image: postgres:9.5
+    environment:
+      POSTGRES_PASSWORD: postgres
+
+  testenv:
+    image: python:3.5
+    depends_on:
+      - postgres
+    env_file: .env
+    environment:
+      SYNAPSE_POSTGRES_HOST: postgres
+      SYNAPSE_POSTGRES_USER: postgres
+      SYNAPSE_POSTGRES_PASSWORD: postgres
+    working_dir: /app
+    volumes:
+      - ..:/app
diff --git a/.buildkite/docker-compose.py37.pg11.yaml b/.buildkite/docker-compose.py37.pg11.yaml
new file mode 100644
index 0000000000..f3eec05ceb
--- /dev/null
+++ b/.buildkite/docker-compose.py37.pg11.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+  postgres:
+    image: postgres:11
+    environment:
+      POSTGRES_PASSWORD: postgres
+
+  testenv:
+    image: python:3.7
+    depends_on:
+      - postgres
+    env_file: .env
+    environment:
+      SYNAPSE_POSTGRES_HOST: postgres
+      SYNAPSE_POSTGRES_USER: postgres
+      SYNAPSE_POSTGRES_PASSWORD: postgres
+    working_dir: /app
+    volumes:
+      - ..:/app
diff --git a/.buildkite/docker-compose.py37.pg95.yaml b/.buildkite/docker-compose.py37.pg95.yaml
new file mode 100644
index 0000000000..2a41db8eba
--- /dev/null
+++ b/.buildkite/docker-compose.py37.pg95.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+  postgres:
+    image: postgres:9.5
+    environment:
+      POSTGRES_PASSWORD: postgres
+
+  testenv:
+    image: python:3.7
+    depends_on:
+      - postgres
+    env_file: .env
+    environment:
+      SYNAPSE_POSTGRES_HOST: postgres
+      SYNAPSE_POSTGRES_USER: postgres
+      SYNAPSE_POSTGRES_PASSWORD: postgres
+    working_dir: /app
+    volumes:
+      - ..:/app
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
new file mode 100644
index 0000000000..24f22c85b4
--- /dev/null
+++ b/.buildkite/pipeline.yml
@@ -0,0 +1,149 @@
+env:
+  CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
+
+steps:
+  - command:
+      - "python -m pip install tox"
+      - "tox -e pep8"
+    label: "\U0001F9F9 PEP-8"
+    plugins:
+      - docker#v3.0.1:
+          image: "python:3.6"
+
+  - command:
+      - "python -m pip install tox"
+      - "tox -e packaging"
+    label: "\U0001F9F9 packaging"
+    plugins:
+      - docker#v3.0.1:
+          image: "python:3.6"
+
+  - command:
+      - "python -m pip install tox"
+      - "tox -e check_isort"
+    label: "\U0001F9F9 isort"
+    plugins:
+      - docker#v3.0.1:
+          image: "python:3.6"
+
+  - command:
+      - "python -m pip install tox"
+      - "scripts-dev/check-newsfragment"
+    label: ":newspaper: Newsfile"
+    branches: "!master !develop !release-*"
+    plugins:
+      - docker#v3.0.1:
+          image: "python:3.6"
+          propagate-environment: true
+
+  - wait
+
+  - command:
+      - "python -m pip install tox"
+      - "tox -e py27,codecov"
+    label: ":python: 2.7 / SQLite"
+    env:
+      TRIAL_FLAGS: "-j 2"
+    plugins:
+      - docker#v3.0.1:
+          image: "python:2.7"
+          propagate-environment: true
+
+  - command:
+      - "python -m pip install tox"
+      - "tox -e py35,codecov"
+    label: ":python: 3.5 / SQLite"
+    env:
+      TRIAL_FLAGS: "-j 2"
+    plugins:
+      - docker#v3.0.1:
+          image: "python:3.5"
+          propagate-environment: true
+
+  - command:
+      - "python -m pip install tox"
+      - "tox -e py36,codecov"
+    label: ":python: 3.6 / SQLite"
+    env:
+      TRIAL_FLAGS: "-j 2"
+    plugins:
+      - docker#v3.0.1:
+          image: "python:3.6"
+          propagate-environment: true
+
+  - command:
+      - "python -m pip install tox"
+      - "tox -e py37,codecov"
+    label: ":python: 3.7 / SQLite"
+    env:
+      TRIAL_FLAGS: "-j 2"
+    plugins:
+      - docker#v3.0.1:
+          image: "python:3.7"
+          propagate-environment: true
+
+  - label: ":python: 2.7 / :postgres: 9.4"
+    env:
+      TRIAL_FLAGS: "-j 4"
+    command:
+      - "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
+    plugins:
+      - docker-compose#v2.1.0:
+          run: testenv
+          config:
+            - .buildkite/docker-compose.py27.pg94.yaml
+
+  - label: ":python: 2.7 / :postgres: 9.5"
+    env:
+      TRIAL_FLAGS: "-j 4"
+    command:
+      - "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
+    plugins:
+      - docker-compose#v2.1.0:
+          run: testenv
+          config:
+            - .buildkite/docker-compose.py27.pg95.yaml
+
+  - label: ":python: 3.5 / :postgres: 9.4"
+    env:
+      TRIAL_FLAGS: "-j 4"
+    command:
+      - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
+    plugins:
+      - docker-compose#v2.1.0:
+          run: testenv
+          config:
+            - .buildkite/docker-compose.py35.pg94.yaml
+
+  - label: ":python: 3.5 / :postgres: 9.5"
+    env:
+      TRIAL_FLAGS: "-j 4"
+    command:
+      - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
+    plugins:
+      - docker-compose#v2.1.0:
+          run: testenv
+          config:
+            - .buildkite/docker-compose.py35.pg95.yaml
+
+  - label: ":python: 3.7 / :postgres: 9.5"
+    env:
+      TRIAL_FLAGS: "-j 4"
+    command:
+      - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
+    plugins:
+      - docker-compose#v2.1.0:
+          run: testenv
+          config:
+            - .buildkite/docker-compose.py37.pg95.yaml
+
+  - label: ":python: 3.7 / :postgres: 11"
+    env:
+      TRIAL_FLAGS: "-j 4"
+    command:
+      - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
+    plugins:
+      - docker-compose#v2.1.0:
+          run: testenv
+          config:
+            - .buildkite/docker-compose.py37.pg11.yaml
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 5d763123a0..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,101 +0,0 @@
-dist: xenial
-language: python
-
-cache:
-  directories:
-    # we only bother to cache the wheels; parts of the http cache get
-    # invalidated every build (because they get served with a max-age of 600
-    # seconds), which means that we end up re-uploading the whole cache for
-    # every build, which is time-consuming In any case, it's not obvious that
-    # downloading the cache from S3 would be much faster than downloading the
-    # originals from pypi.
-    #
-    - $HOME/.cache/pip/wheels
-
-# don't clone the whole repo history, one commit will do
-git:
-  depth: 1
-
-# only build branches we care about (PRs are built seperately)
-branches:
-  only:
-    - master
-    - develop
-    - /^release-v/
-    - rav/pg95
-
-# When running the tox environments that call Twisted Trial, we can pass the -j
-# flag to run the tests concurrently. We set this to 2 for CPU bound tests
-# (SQLite) and 4 for I/O bound tests (PostgreSQL).
-matrix:
-  fast_finish: true
-  include:
-  - name: "pep8"
-    python: 3.6
-    env: TOX_ENV="pep8,check_isort,packaging"
-
-  - name: "py2.7 / sqlite"
-    python: 2.7
-    env: TOX_ENV=py27,codecov TRIAL_FLAGS="-j 2"
-
-  - name: "py2.7 / sqlite / olddeps"
-    python: 2.7
-    env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
-
-  - name: "py2.7 / postgres9.5"
-    python: 2.7
-    addons:
-      postgresql: "9.5"
-    env: TOX_ENV=py27-postgres,codecov TRIAL_FLAGS="-j 4"
-    services:
-      - postgresql
-
-  - name: "py3.5 / sqlite"
-    python: 3.5
-    env: TOX_ENV=py35,codecov TRIAL_FLAGS="-j 2"
-
-  - name: "py3.7 / sqlite"
-    python: 3.7
-    env: TOX_ENV=py37,codecov TRIAL_FLAGS="-j 2"
-
-  - name: "py3.7 / postgres9.4"
-    python: 3.7
-    addons:
-      postgresql: "9.4"
-    env: TOX_ENV=py37-postgres TRIAL_FLAGS="-j 4"
-    services:
-      - postgresql
-
-  - name: "py3.7 / postgres9.5"
-    python: 3.7
-    addons:
-      postgresql: "9.5"
-    env: TOX_ENV=py37-postgres,codecov TRIAL_FLAGS="-j 4"
-    services:
-      - postgresql
-
-  - # we only need to check for the newsfragment if it's a PR build
-    if: type = pull_request
-    name: "check-newsfragment"
-    python: 3.6
-    env: TOX_ENV=check-newsfragment
-    script:
-      - git remote set-branches --add origin develop
-      - git fetch origin develop
-      - tox -e $TOX_ENV
-
-install:
-  # this just logs the postgres version we will be testing against (if any)
-  - psql -At -U postgres -c 'select version();' || true
-
-  - pip install tox
-
-  # if we don't have python3.6 in this environment, travis unhelpfully gives us
-  # a `python3.6` on our path which does nothing but spit out a warning. Tox
-  # tries to run it (even if we're not running a py36 env), so the build logs
-  # then have warnings which look like errors. To reduce the noise, remove the
-  # non-functional python3.6.
-  - ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
-
-script:
-  - tox -e $TOX_ENV
diff --git a/CHANGES.md b/CHANGES.md
index f1a9d58e4d..b25775d18e 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,62 @@
+Synapse 0.99.2 (2019-03-01)
+===========================
+
+Features
+--------
+
+- Added an HAProxy example in the reverse proxy documentation. Contributed by Benoît S. (“Benpro”). ([\#4541](https://github.com/matrix-org/synapse/issues/4541))
+- Add basic optional sentry integration. ([\#4632](https://github.com/matrix-org/synapse/issues/4632), [\#4694](https://github.com/matrix-org/synapse/issues/4694))
+- Transfer bans on room upgrade. ([\#4642](https://github.com/matrix-org/synapse/issues/4642))
+- Add configurable room list publishing rules. ([\#4647](https://github.com/matrix-org/synapse/issues/4647))
+- Support .well-known delegation when issuing certificates through ACME. ([\#4652](https://github.com/matrix-org/synapse/issues/4652))
+- Allow registration and login to be handled by a worker instance. ([\#4666](https://github.com/matrix-org/synapse/issues/4666), [\#4670](https://github.com/matrix-org/synapse/issues/4670), [\#4682](https://github.com/matrix-org/synapse/issues/4682))
+- Reduce the overhead of creating outbound federation connections over TLS by caching the TLS client options. ([\#4674](https://github.com/matrix-org/synapse/issues/4674))
+- Add prometheus metrics for number of outgoing EDUs, by type. ([\#4695](https://github.com/matrix-org/synapse/issues/4695))
+- Return correct error code when inviting a remote user to a room whose homeserver does not support the room version. ([\#4721](https://github.com/matrix-org/synapse/issues/4721))
+- Prevent showing rooms to other servers that were set to not federate. ([\#4746](https://github.com/matrix-org/synapse/issues/4746))
+
+
+Bugfixes
+--------
+
+- Fix possible exception when paginating. ([\#4263](https://github.com/matrix-org/synapse/issues/4263))
+- The dependency checker now correctly reports a version mismatch for optional
+  dependencies, instead of reporting the dependency missing. ([\#4450](https://github.com/matrix-org/synapse/issues/4450))
+- Set CORS headers on .well-known requests. ([\#4651](https://github.com/matrix-org/synapse/issues/4651))
+- Fix kicking guest users on guest access revocation in worker mode. ([\#4667](https://github.com/matrix-org/synapse/issues/4667))
+- Fix an issue in the database migration script where the
+  `e2e_room_keys.is_verified` column wasn't considered as
+  a boolean. ([\#4680](https://github.com/matrix-org/synapse/issues/4680))
+- Fix TaskStopped exceptions in logs when outbound requests time out. ([\#4690](https://github.com/matrix-org/synapse/issues/4690))
+- Fix ACME config for python 2. ([\#4717](https://github.com/matrix-org/synapse/issues/4717))
+- Fix paginating over federation persisting incorrect state. ([\#4718](https://github.com/matrix-org/synapse/issues/4718))
+
+
+Internal Changes
+----------------
+
+- Run `black` to reformat user directory code. ([\#4635](https://github.com/matrix-org/synapse/issues/4635))
+- Reduce number of exceptions we log. ([\#4643](https://github.com/matrix-org/synapse/issues/4643), [\#4668](https://github.com/matrix-org/synapse/issues/4668))
+- Introduce upsert batching functionality in the database layer. ([\#4644](https://github.com/matrix-org/synapse/issues/4644))
+- Fix various spelling mistakes. ([\#4657](https://github.com/matrix-org/synapse/issues/4657))
+- Cleanup request exception logging. ([\#4669](https://github.com/matrix-org/synapse/issues/4669), [\#4737](https://github.com/matrix-org/synapse/issues/4737), [\#4738](https://github.com/matrix-org/synapse/issues/4738))
+- Improve replication performance by reducing cache invalidation traffic. ([\#4671](https://github.com/matrix-org/synapse/issues/4671), [\#4715](https://github.com/matrix-org/synapse/issues/4715), [\#4748](https://github.com/matrix-org/synapse/issues/4748))
+- Test against Postgres 9.5 as well as 9.4. ([\#4676](https://github.com/matrix-org/synapse/issues/4676))
+- Run unit tests against python 3.7. ([\#4677](https://github.com/matrix-org/synapse/issues/4677))
+- Attempt to clarify installation instructions/config. ([\#4681](https://github.com/matrix-org/synapse/issues/4681))
+- Clean up gitignores. ([\#4688](https://github.com/matrix-org/synapse/issues/4688))
+- Minor tweaks to acme docs. ([\#4689](https://github.com/matrix-org/synapse/issues/4689))
+- Improve the logging in the pusher process. ([\#4691](https://github.com/matrix-org/synapse/issues/4691))
+- Better checks on newsfragments. ([\#4698](https://github.com/matrix-org/synapse/issues/4698), [\#4750](https://github.com/matrix-org/synapse/issues/4750))
+- Avoid some redundant work when processing read receipts. ([\#4706](https://github.com/matrix-org/synapse/issues/4706))
+- Run `push_receipts_to_remotes` as background job. ([\#4707](https://github.com/matrix-org/synapse/issues/4707))
+- Add prometheus metrics for number of badge update pushes. ([\#4709](https://github.com/matrix-org/synapse/issues/4709))
+- Reduce pusher logging on startup ([\#4716](https://github.com/matrix-org/synapse/issues/4716))
+- Don't log exceptions when failing to fetch remote server keys. ([\#4722](https://github.com/matrix-org/synapse/issues/4722))
+- Correctly proxy exception in frontend_proxy worker. ([\#4723](https://github.com/matrix-org/synapse/issues/4723))
+- Add database version to phonehome stats. ([\#4753](https://github.com/matrix-org/synapse/issues/4753))
+
+
 Synapse 0.99.1.1 (2019-02-14)
 =============================
 
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index b99a022c67..9a283ced6e 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -30,7 +30,7 @@ use github's pull request workflow to review the contribution, and either ask
 you to make any refinements needed or merge it and make them ourselves. The
 changes will then land on master when we next do a release.
 
-We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI 
+We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
 <https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
 pull requests to synapse get automatically tested by Travis and CircleCI.
 If your change breaks the build, this will be shown in GitHub, so please
@@ -74,16 +74,39 @@ entry. These are managed by Towncrier
 To create a changelog entry, make a new file in the ``changelog.d``
 file named in the format of ``PRnumber.type``. The type can be
 one of ``feature``, ``bugfix``, ``removal`` (also used for
-deprecations), or ``misc`` (for internal-only changes). The content of
-the file is your changelog entry, which can contain Markdown
-formatting. Adding credits to the changelog is encouraged, we value
-your contributions and would like to have you shouted out in the
-release notes!
+deprecations), or ``misc`` (for internal-only changes).
+
+The content of the file is your changelog entry, which can contain Markdown
+formatting. The entry should end with a full stop ('.') for consistency.
+
+Adding credits to the changelog is encouraged, we value your
+contributions and would like to have you shouted out in the release notes!
 
 For example, a fix in PR #1234 would have its changelog entry in
 ``changelog.d/1234.bugfix``, and contain content like "The security levels of
 Florbs are now validated when recieved over federation. Contributed by Jane
-Matrix".
+Matrix.".
+
+Debian changelog
+----------------
+
+Changes which affect the debian packaging files (in ``debian``) are an
+exception.
+
+In this case, you will need to add an entry to the debian changelog for the
+next release. For this, run the following command::
+
+  dch
+
+This will make up a new version number (if there isn't already an unreleased
+version in flight), and open an editor where you can add a new changelog entry.
+(Our release process will ensure that the version number and maintainer name is
+corrected for the release.)
+
+If your change affects both the debian packaging *and* files outside the debian
+directory, you will need both a regular newsfragment *and* an entry in the
+debian changelog. (Though typically such changes should be submitted as two
+separate pull requests.)
 
 Attribution
 ~~~~~~~~~~~
diff --git a/MANIFEST.in b/MANIFEST.in
index eb2de60f72..0500dd6b87 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -39,6 +39,7 @@ prune .circleci
 prune .coveragerc
 prune debian
 prune .codecov.yml
+prune .buildkite
 
 exclude jenkins*
 recursive-exclude jenkins *.sh
diff --git a/README.rst b/README.rst
index 9a7c04b55e..8e22109973 100644
--- a/README.rst
+++ b/README.rst
@@ -199,6 +199,8 @@ by installing the ``libjemalloc1`` package and adding this line to
 
     LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
 
+This can make a significant difference on Python 2.7 - it's unclear how
+much of an improvement it provides on Python 3.x.
 
 Upgrading an existing Synapse
 =============================
diff --git a/changelog.d/4450.bugfix b/changelog.d/4450.bugfix
deleted file mode 100644
index b194e94c15..0000000000
--- a/changelog.d/4450.bugfix
+++ /dev/null
@@ -1,2 +0,0 @@
-The dependency checker now correctly reports a version mismatch for optional
-dependencies, instead of reporting the dependency missing.
diff --git a/changelog.d/4632.feature b/changelog.d/4632.feature
deleted file mode 100644
index d053ab5a25..0000000000
--- a/changelog.d/4632.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add basic optional sentry integration
diff --git a/changelog.d/4635.misc b/changelog.d/4635.misc
deleted file mode 100644
index 0f45957b84..0000000000
--- a/changelog.d/4635.misc
+++ /dev/null
@@ -1 +0,0 @@
-Run `black` to reformat user directory code.
diff --git a/changelog.d/4642.feature b/changelog.d/4642.feature
deleted file mode 100644
index bfbf95bcbb..0000000000
--- a/changelog.d/4642.feature
+++ /dev/null
@@ -1 +0,0 @@
-Transfer bans on room upgrade.
\ No newline at end of file
diff --git a/changelog.d/4643.misc b/changelog.d/4643.misc
deleted file mode 100644
index 556cdd2240..0000000000
--- a/changelog.d/4643.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reduce number of exceptions we log
diff --git a/changelog.d/4644.misc b/changelog.d/4644.misc
deleted file mode 100644
index 84137c3412..0000000000
--- a/changelog.d/4644.misc
+++ /dev/null
@@ -1 +0,0 @@
-Introduce upsert batching functionality in the database layer.
diff --git a/changelog.d/4647.feature b/changelog.d/4647.feature
deleted file mode 100644
index 5a5b1dcebb..0000000000
--- a/changelog.d/4647.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add configurable room list publishing rules
diff --git a/changelog.d/4651.bugfix b/changelog.d/4651.bugfix
deleted file mode 100644
index 15cb1e58c4..0000000000
--- a/changelog.d/4651.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Set CORS headers on .well-known requests
diff --git a/changelog.d/4652.feature b/changelog.d/4652.feature
deleted file mode 100644
index ebe6880b21..0000000000
--- a/changelog.d/4652.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support .well-known delegation when issuing certificates through ACME.
diff --git a/changelog.d/4657.misc b/changelog.d/4657.misc
deleted file mode 100644
index 8872765819..0000000000
--- a/changelog.d/4657.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix various spelling mistakes.
diff --git a/changelog.d/4666.feature b/changelog.d/4666.feature
deleted file mode 100644
index b3a3915eb0..0000000000
--- a/changelog.d/4666.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow registration and login to be handled by a worker instance.
diff --git a/changelog.d/4667.bugfix b/changelog.d/4667.bugfix
deleted file mode 100644
index 33ad00c137..0000000000
--- a/changelog.d/4667.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix kicking guest users on guest access revocation in worker mode.
diff --git a/changelog.d/4668.misc b/changelog.d/4668.misc
deleted file mode 100644
index 556cdd2240..0000000000
--- a/changelog.d/4668.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reduce number of exceptions we log
diff --git a/changelog.d/4669.misc b/changelog.d/4669.misc
deleted file mode 100644
index 00a1a940ae..0000000000
--- a/changelog.d/4669.misc
+++ /dev/null
@@ -1 +0,0 @@
-Cleanup request exception logging
diff --git a/changelog.d/4670.feature b/changelog.d/4670.feature
deleted file mode 100644
index b3a3915eb0..0000000000
--- a/changelog.d/4670.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow registration and login to be handled by a worker instance.
diff --git a/changelog.d/4671.misc b/changelog.d/4671.misc
deleted file mode 100644
index 4dc18378e7..0000000000
--- a/changelog.d/4671.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve replication performance by reducing cache invalidation traffic.
diff --git a/changelog.d/4674.feature b/changelog.d/4674.feature
deleted file mode 100644
index 84630bb201..0000000000
--- a/changelog.d/4674.feature
+++ /dev/null
@@ -1 +0,0 @@
-Reduce the overhead of creating outbound federation connections over TLS by caching the TLS client options.
diff --git a/changelog.d/4676.misc b/changelog.d/4676.misc
deleted file mode 100644
index a250558e69..0000000000
--- a/changelog.d/4676.misc
+++ /dev/null
@@ -1 +0,0 @@
-Test against Postgres 9.5 as well as 9.4
diff --git a/changelog.d/4677.misc b/changelog.d/4677.misc
deleted file mode 100644
index 6f4596be4a..0000000000
--- a/changelog.d/4677.misc
+++ /dev/null
@@ -1 +0,0 @@
-Run unit tests against python 3.7.
diff --git a/changelog.d/4681.misc b/changelog.d/4681.misc
deleted file mode 100644
index 37d3588804..0000000000
--- a/changelog.d/4681.misc
+++ /dev/null
@@ -1 +0,0 @@
-Attempt to clarify installation instructions/config
diff --git a/changelog.d/4682.feature b/changelog.d/4682.feature
deleted file mode 100644
index b3a3915eb0..0000000000
--- a/changelog.d/4682.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow registration and login to be handled by a worker instance.
diff --git a/changelog.d/4688.misc b/changelog.d/4688.misc
deleted file mode 100644
index 24cd2eb424..0000000000
--- a/changelog.d/4688.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up gitignores
diff --git a/changelog.d/4690.bugfix b/changelog.d/4690.bugfix
deleted file mode 100644
index e4cfc5e413..0000000000
--- a/changelog.d/4690.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix TaskStopped exceptions in logs when outbound requests time out.
\ No newline at end of file
diff --git a/changelog.d/4691.misc b/changelog.d/4691.misc
deleted file mode 100644
index 8eb825edf0..0000000000
--- a/changelog.d/4691.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve the logging in the pusher process.
diff --git a/changelog.d/4694.feature b/changelog.d/4694.feature
deleted file mode 100644
index d053ab5a25..0000000000
--- a/changelog.d/4694.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add basic optional sentry integration
diff --git a/changelog.d/4695.feature b/changelog.d/4695.feature
deleted file mode 100644
index 3816c9dec8..0000000000
--- a/changelog.d/4695.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add prometheus metrics for number of outgoing EDUs, by type.
diff --git a/changelog.d/4740.bugfix b/changelog.d/4740.bugfix
new file mode 100644
index 0000000000..f82bb4227a
--- /dev/null
+++ b/changelog.d/4740.bugfix
@@ -0,0 +1 @@
+'event_id' is now a required parameter in federated state requests, as per the matrix spec.
diff --git a/changelog.d/4749.bugfix b/changelog.d/4749.bugfix
new file mode 100644
index 0000000000..174e6b4e5e
--- /dev/null
+++ b/changelog.d/4749.bugfix
@@ -0,0 +1 @@
+Fix tightloop over connecting to replication server.
diff --git a/changelog.d/4752.misc b/changelog.d/4752.misc
new file mode 100644
index 0000000000..fb1e76edce
--- /dev/null
+++ b/changelog.d/4752.misc
@@ -0,0 +1 @@
+Change from TravisCI to Buildkite for CI.
diff --git a/changelog.d/4757.feature b/changelog.d/4757.feature
new file mode 100644
index 0000000000..b89029f2b4
--- /dev/null
+++ b/changelog.d/4757.feature
@@ -0,0 +1 @@
+Move server key queries to federation reader.
diff --git a/changelog.d/4757.misc b/changelog.d/4757.misc
new file mode 100644
index 0000000000..42bb66f7aa
--- /dev/null
+++ b/changelog.d/4757.misc
@@ -0,0 +1 @@
+When presence is disabled don't send over replication.
diff --git a/changelog.d/4759.feature b/changelog.d/4759.feature
new file mode 100644
index 0000000000..643ee404dc
--- /dev/null
+++ b/changelog.d/4759.feature
@@ -0,0 +1 @@
+Add support for /account/3pid REST endpoint to client_reader worker.
diff --git a/changelog.d/4763.bugfix b/changelog.d/4763.bugfix
new file mode 100644
index 0000000000..213ea44b70
--- /dev/null
+++ b/changelog.d/4763.bugfix
@@ -0,0 +1 @@
+Fix parsing of Content-Disposition headers on remote media requests and URL previews.
diff --git a/changelog.d/4765.misc b/changelog.d/4765.misc
new file mode 100644
index 0000000000..c273fd0cc4
--- /dev/null
+++ b/changelog.d/4765.misc
@@ -0,0 +1 @@
+Minor docstring fixes for MatrixFederationAgent.
\ No newline at end of file
diff --git a/changelog.d/4771.misc b/changelog.d/4771.misc
new file mode 100644
index 0000000000..8fa3401ca4
--- /dev/null
+++ b/changelog.d/4771.misc
@@ -0,0 +1 @@
+Update test_typing to use HomeserverTestCase.
diff --git a/changelog.d/4776.bugfix b/changelog.d/4776.bugfix
new file mode 100644
index 0000000000..ce3e6ce33c
--- /dev/null
+++ b/changelog.d/4776.bugfix
@@ -0,0 +1 @@
+Fix incorrect log about not persisting duplicate state event.
diff --git a/debian/changelog b/debian/changelog
index 124128920b..fd77ce13a2 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+matrix-synapse-py3 (0.99.2) stable; urgency=medium
+
+  * Fix overwriting of config settings on upgrade.
+  * New synapse release 0.99.2.
+
+ -- Synapse Packaging team <packages@matrix.org>  Fri, 01 Mar 2019 10:55:08 +0000
+
 matrix-synapse-py3 (0.99.1.1) stable; urgency=medium
 
   * New synapse release 0.99.1.1
diff --git a/debian/install b/debian/install
index 3d916a9718..43dc8c6904 100644
--- a/debian/install
+++ b/debian/install
@@ -1 +1,2 @@
 debian/log.yaml etc/matrix-synapse
+debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/
diff --git a/debian/manage_debconf.pl b/debian/manage_debconf.pl
new file mode 100755
index 0000000000..be8ed32050
--- /dev/null
+++ b/debian/manage_debconf.pl
@@ -0,0 +1,130 @@
+#!/usr/bin/perl
+#
+# Interface between our config files and the debconf database.
+#
+# Usage:
+#
+#   manage_debconf.pl <action>
+#
+# where <action> can be:
+#
+#   read:    read the configuration from the yaml into debconf
+#   update:  update the yaml config according to the debconf database
+use strict;
+use warnings;
+
+use Debconf::Client::ConfModule (qw/get set/);
+
+# map from the name of a setting in our .yaml file to the relevant debconf
+# setting.
+my %MAPPINGS=(
+    server_name => 'matrix-synapse/server-name',
+    report_stats => 'matrix-synapse/report-stats',
+);
+
+# enable debug if dpkg --debug
+my $DEBUG = $ENV{DPKG_MAINTSCRIPT_DEBUG};
+
+sub read_config {
+    my @files = @_;
+
+    foreach my $file (@files)  {
+        print STDERR "reading $file\n" if $DEBUG;
+
+        open my $FH, "<", $file or next;
+
+        # rudimentary parsing which (a) avoids having to depend on a yaml library,
+        # and (b) is tolerant of yaml errors
+        while($_ = <$FH>) {
+            while (my ($setting, $debconf) = each %MAPPINGS) {
+                $setting = quotemeta $setting;
+                if(/^${setting}\s*:(.*)$/) {
+                    my $val = $1;
+
+                    # remove leading/trailing whitespace
+                    $val =~ s/^\s*//;
+                    $val =~ s/\s*$//;
+
+                    # remove surrounding quotes
+                    if ($val =~ /^"(.*)"$/ || $val =~ /^'(.*)'$/) {
+                        $val = $1;
+                    }
+
+                    print STDERR ">> $debconf = $val\n" if $DEBUG;
+                    set($debconf, $val);
+                }
+            }
+        }
+        close $FH;
+    }
+}
+
+sub update_config {
+    my @files = @_;
+
+    my %substs = ();
+    while (my ($setting, $debconf) = each %MAPPINGS) {
+        my @res = get($debconf);
+        $substs{$setting} = $res[1] if $res[0] == 0;
+    }
+
+    foreach my $file (@files) {
+        print STDERR "checking $file\n" if $DEBUG;
+
+        open my $FH, "<", $file or next;
+
+        my $updated = 0;
+
+        # read the whole file into memory
+        my @lines = <$FH>;
+
+        while (my ($setting, $val) = each %substs) {
+            $setting = quotemeta $setting;
+
+            map {
+                if (/^${setting}\s*:\s*(.*)\s*$/) {
+                    my $current = $1;
+                    if ($val ne $current) {
+                        $_ = "${setting}: $val\n";
+                        $updated = 1;
+                    }
+                }
+            } @lines;
+        }
+        close $FH;
+
+        next unless $updated;
+
+        print STDERR "updating $file\n" if $DEBUG;
+        open $FH, ">", $file or die "unable to update $file";
+        print $FH @lines;
+        close $FH;
+    }
+}
+
+
+my $cmd = $ARGV[0];
+
+my $read = 0;
+my $update = 0;
+
+if (not $cmd) {
+    die "must specify a command to perform\n";
+} elsif ($cmd eq 'read') {
+    $read = 1;
+} elsif ($cmd eq 'update') {
+    $update = 1;
+} else {
+    die "unknown command '$cmd'\n";
+}
+
+my @files = (
+    "/etc/matrix-synapse/homeserver.yaml",
+    glob("/etc/matrix-synapse/conf.d/*.yaml"),
+);
+
+if ($read) {
+    read_config(@files);
+} elsif ($update) {
+    update_config(@files);
+}
diff --git a/debian/config b/debian/matrix-synapse-py3.config
index 9fb6913298..3bda3292f1 100755
--- a/debian/config
+++ b/debian/matrix-synapse-py3.config
@@ -4,6 +4,9 @@ set -e
 
 . /usr/share/debconf/confmodule
 
+# try to update the debconf db according to whatever is in the config files
+/opt/venvs/matrix-synapse/lib/manage_debconf.pl read || true
+
 db_input high matrix-synapse/server-name || true
 db_input high matrix-synapse/report-stats || true
 db_go
diff --git a/debian/matrix-synapse-py3.postinst b/debian/matrix-synapse-py3.postinst
index 0509acd0a4..c0dd7e5534 100644
--- a/debian/matrix-synapse-py3.postinst
+++ b/debian/matrix-synapse-py3.postinst
@@ -8,19 +8,36 @@ USER="matrix-synapse"
 
 case "$1" in
   configure|reconfigure)
-    # Set server name in config file
-    mkdir -p "/etc/matrix-synapse/conf.d/"
-    db_get matrix-synapse/server-name
 
-    if [ "$RET" ]; then
-        echo "server_name: $RET" > $CONFIGFILE_SERVERNAME
+    # generate template config files if they don't exist
+    mkdir -p "/etc/matrix-synapse/conf.d/"
+    if [ ! -e "$CONFIGFILE_SERVERNAME" ]; then
+        cat > "$CONFIGFILE_SERVERNAME" <<EOF
+# This file is autogenerated, and will be recreated on upgrade if it is deleted.
+# Any changes you make will be preserved.
+
+# The domain name of the server, with optional explicit port.
+# This is used by remote servers to connect to this server,
+# e.g. matrix.org, localhost:8080, etc.
+# This is also the last part of your UserID.
+#
+server_name: ''
+EOF
     fi
 
-    db_get matrix-synapse/report-stats
-    if [ "$RET" ]; then
-        echo "report_stats: $RET" > $CONFIGFILE_REPORTSTATS
+    if [ ! -e "$CONFIGFILE_REPORTSTATS" ]; then
+        cat > "$CONFIGFILE_REPORTSTATS" <<EOF
+# This file is autogenerated, and will be recreated on upgrade if it is deleted.
+# Any changes you make will be preserved.
+
+# Whether to report anonymized homeserver usage statistics.
+report_stats: false
+EOF
     fi
 
+    # update the config files according to whatever is in the debconf database
+    /opt/venvs/matrix-synapse/lib/manage_debconf.pl update
+
     if ! getent passwd $USER >/dev/null; then
       adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
     fi
diff --git a/docs/ACME.md b/docs/ACME.md
index e555c7c939..46136a9f2c 100644
--- a/docs/ACME.md
+++ b/docs/ACME.md
@@ -10,13 +10,14 @@ through [Let's Encrypt](https://letsencrypt.org/) if you tell it to.
 
 In the case that your `server_name` config variable is the same as
 the hostname that the client connects to, then the same certificate can be
-used between client and federation ports without issue. 
+used between client and federation ports without issue.
 
-For a sample configuration, please inspect the new ACME section in the example
-generated config by running the `generate-config` executable. For example:
+If your configuration file does not already have an `acme` section, you can
+generate an example config by running the `generate_config` executable. For
+example:
 
 ```
-~/synapse/env3/bin/generate-config
+~/synapse/env3/bin/generate_config
 ```
 
 You will need to provide Let's Encrypt (or another ACME provider) access to
@@ -27,10 +28,9 @@ like `authbind` to allow Synapse to listen on port 80 without root access.
 (Do not run Synapse with root permissions!) Detailed instructions are
 available under "ACME setup" below.
 
-If you are already using self-signed certificates, you will need to back up
-or delete them (files `example.com.tls.crt` and `example.com.tls.key` in
-Synapse's root directory), Synapse's ACME implementation will not overwrite
-them.
+If you already have certificates, you will need to back up or delete them
+(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
+directory), Synapse's ACME implementation will not overwrite them.
 
 You may wish to use alternate methods such as Certbot to obtain a certificate
 from Let's Encrypt, depending on your server configuration. Of course, if you
@@ -87,7 +87,6 @@ acme:
     port: 8009
 ```
 
-
 #### Authbind
 
 `authbind` allows a program which does not run as root to bind to
@@ -127,4 +126,4 @@ acme:
 
 Ensure that the certificate paths specified in `homeserver.yaml` (`tls_certificate_path` and `tls_private_key_path`) do not currently point to any files. Synapse will not provision certificates if files exist, as it does not want to overwrite existing certificates.
 
-Finally, start/restart Synapse.
\ No newline at end of file
+Finally, start/restart Synapse.
diff --git a/docs/reverse_proxy.rst b/docs/reverse_proxy.rst
index d8aaac8a08..4706061eba 100644
--- a/docs/reverse_proxy.rst
+++ b/docs/reverse_proxy.rst
@@ -79,12 +79,30 @@ Let's assume that we expect clients to connect to our server at
           SSLEngine on
           ServerName example.com;
 
-          <Location />
+          <Location /_matrix>
               ProxyPass http://127.0.0.1:8008/_matrix nocanon
               ProxyPassReverse http://127.0.0.1:8008/_matrix
           </Location>
       </VirtualHost>
 
+* HAProxy::
+
+      frontend https
+        bind 0.0.0.0:443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
+        bind :::443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
+        
+        # Matrix client traffic
+        acl matrix hdr(host) -i matrix.example.com
+        use_backend matrix if matrix
+        
+      frontend matrix-federation
+        bind 0.0.0.0:8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
+        bind :::8448 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
+        default_backend matrix
+        
+      backend matrix
+        server matrix 127.0.0.1:8008
+
 You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
 for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
 recorded correctly.
diff --git a/docs/tcp_replication.rst b/docs/tcp_replication.rst
index 73436cea62..75e723484c 100644
--- a/docs/tcp_replication.rst
+++ b/docs/tcp_replication.rst
@@ -188,7 +188,9 @@ RDATA (S)
     A single update in a stream
 
 POSITION (S)
-    The position of the stream has been updated
+    The position of the stream has been updated. Sent to the client after all
+    missing updates for a stream have been sent to the client and they're now
+    up to date.
 
 ERROR (S, C)
     There was an error
diff --git a/docs/workers.rst b/docs/workers.rst
index 3ba5879f76..3c18db1b19 100644
--- a/docs/workers.rst
+++ b/docs/workers.rst
@@ -182,6 +182,7 @@ endpoints matching the following regular expressions::
     ^/_matrix/federation/v1/event_auth/
     ^/_matrix/federation/v1/exchange_third_party_invite/
     ^/_matrix/federation/v1/send/
+    ^/_matrix/key/v2/query
 
 The above endpoints should all be routed to the federation_reader worker by the
 reverse-proxy configuration.
@@ -223,6 +224,7 @@ following regular expressions::
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
     ^/_matrix/client/(api/v1|r0|unstable)/login$
+    ^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
 
 Additionally, the following REST endpoints can be handled, but all requests must
 be routed to the same instance::
diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment
new file mode 100755
index 0000000000..e4a22bae61
--- /dev/null
+++ b/scripts-dev/check-newsfragment
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# A script which checks that an appropriate news file has been added on this
+# branch.
+
+set -e
+
+# make sure that origin/develop is up to date
+git remote set-branches --add origin develop
+git fetch --depth=1 origin develop
+
+UPSTREAM=origin/develop
+
+# if there are changes in the debian directory, check that the debian changelog
+# has been updated
+if ! git diff --quiet $UPSTREAM... -- debian; then
+    if git diff --quiet $UPSTREAM... -- debian/changelog; then
+        echo "Updates to debian directory, but no update to the changelog." >&2
+        exit 1
+    fi
+fi
+
+# if there are changes *outside* the debian directory, check that the
+# newsfragments have been updated.
+if git diff --name-only $UPSTREAM... | grep -qv '^develop/'; then
+    tox -e check-newsfragment
+fi
+
+echo
+echo "--------------------------"
+echo
+
+# check that any new newsfiles on this branch end with a full stop.
+for f in `git diff --name-only $UPSTREAM... -- changelog.d`; do
+    lastchar=`tr -d '\n' < $f | tail -c 1`
+    if [ $lastchar != '.' ]; then
+        echo -e "\e[31mERROR: newsfragment $f does not end with a '.'\e[39m" >&2
+        exit 1
+    fi
+done
+
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 3c7b606323..2fa01d1a18 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -53,6 +53,7 @@ BOOLEAN_COLUMNS = {
     "group_summary_users": ["is_public"],
     "group_roles": ["is_public"],
     "local_group_membership": ["is_publicised", "is_admin"],
+    "e2e_room_keys": ["is_verified"],
 }
 
 
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 2004375f98..25c10244d3 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -27,4 +27,4 @@ try:
 except ImportError:
     pass
 
-__version__ = "0.99.1.1"
+__version__ = "0.99.2"
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 043b48f8f3..5070094cad 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -48,6 +48,7 @@ from synapse.rest.client.v1.room import (
     RoomMemberListRestServlet,
     RoomStateRestServlet,
 )
+from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
 from synapse.rest.client.v2_alpha.register import RegisterRestServlet
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
@@ -96,6 +97,7 @@ class ClientReaderServer(HomeServer):
                     RoomEventContextServlet(self).register(resource)
                     RegisterRestServlet(self).register(resource)
                     LoginRestServlet(self).register(resource)
+                    ThreepidRestServlet(self).register(resource)
 
                     resources.update({
                         "/_matrix/client/r0": resource,
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index b116c17669..7da79dc827 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
 
 import synapse
 from synapse import events
-from synapse.api.urls import FEDERATION_PREFIX
+from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX
 from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
@@ -44,6 +44,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
 from synapse.replication.slave.storage.room import RoomStore
 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.rest.key.v2 import KeyApiV2Resource
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
 from synapse.util.httpresourcetree import create_resource_tree
@@ -99,6 +100,9 @@ class FederationReaderServer(HomeServer):
                         ),
                     })
 
+                if name in ["keys", "federation"]:
+                    resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
+
         root_resource = create_resource_tree(resources, NoResource())
 
         _base.listen_tcp(
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index d5b954361d..8479fee738 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
 
 import synapse
 from synapse import events
-from synapse.api.errors import SynapseError
+from synapse.api.errors import HttpResponseException, SynapseError
 from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
@@ -66,10 +66,15 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
         headers = {
             "Authorization": auth_headers,
         }
-        result = yield self.http_client.get_json(
-            self.main_uri + request.uri.decode('ascii'),
-            headers=headers,
-        )
+
+        try:
+            result = yield self.http_client.get_json(
+                self.main_uri + request.uri.decode('ascii'),
+                headers=headers,
+            )
+        except HttpResponseException as e:
+            raise e.to_synapse_error()
+
         defer.returnValue((200, result))
 
     @defer.inlineCallbacks
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 05a97979ec..e8b6cc3114 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -555,6 +555,9 @@ def run(hs):
                 stats["memory_rss"] += process.memory_info().rss
                 stats["cpu_average"] += int(process.cpu_percent(interval=None))
 
+        stats["database_engine"] = hs.get_datastore().database_engine_name
+        stats["database_server_version"] = hs.get_datastore().get_server_version()
+
         logger.info("Reporting stats to matrix.org: %s" % (stats,))
         try:
             yield hs.get_simple_http_client().put_json(
diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py
index 4064891ffb..d25196be08 100644
--- a/synapse/config/captcha.py
+++ b/synapse/config/captcha.py
@@ -47,5 +47,5 @@ class CaptchaConfig(Config):
         #captcha_bypass_secret: "YOUR_SECRET_HERE"
 
         # The API endpoint to use for verifying m.login.recaptcha responses.
-        recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
+        recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
         """
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index 8d5d287357..40045de7ac 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -19,6 +19,8 @@ import warnings
 from datetime import datetime
 from hashlib import sha256
 
+import six
+
 from unpaddedbase64 import encode_base64
 
 from OpenSSL import crypto
@@ -36,9 +38,11 @@ class TlsConfig(Config):
             acme_config = {}
 
         self.acme_enabled = acme_config.get("enabled", False)
-        self.acme_url = acme_config.get(
+
+        # hyperlink complains on py2 if this is not a Unicode
+        self.acme_url = six.text_type(acme_config.get(
             "url", u"https://acme-v01.api.letsencrypt.org/directory"
-        )
+        ))
         self.acme_port = acme_config.get("port", 80)
         self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0'])
         self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
@@ -55,7 +59,7 @@ class TlsConfig(Config):
                 )
             if not self.tls_private_key_file:
                 raise ConfigError(
-                    "tls_certificate_path must be specified if TLS-enabled listeners are "
+                    "tls_private_key_path must be specified if TLS-enabled listeners are "
                     "configured."
                 )
 
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index cce40fdd2d..7474fd515f 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -17,6 +17,7 @@
 import logging
 from collections import namedtuple
 
+from six import raise_from
 from six.moves import urllib
 
 from signedjson.key import (
@@ -35,7 +36,12 @@ from unpaddedbase64 import decode_base64
 
 from twisted.internet import defer
 
-from synapse.api.errors import Codes, RequestSendFailed, SynapseError
+from synapse.api.errors import (
+    Codes,
+    HttpResponseException,
+    RequestSendFailed,
+    SynapseError,
+)
 from synapse.util import logcontext, unwrapFirstError
 from synapse.util.logcontext import (
     LoggingContext,
@@ -44,6 +50,7 @@ from synapse.util.logcontext import (
     run_in_background,
 )
 from synapse.util.metrics import Measure
+from synapse.util.retryutils import NotRetryingDestination
 
 logger = logging.getLogger(__name__)
 
@@ -367,13 +374,18 @@ class Keyring(object):
                     server_name_and_key_ids, perspective_name, perspective_keys
                 )
                 defer.returnValue(result)
+            except KeyLookupError as e:
+                logger.warning(
+                    "Key lookup failed from %r: %s", perspective_name, e,
+                )
             except Exception as e:
                 logger.exception(
                     "Unable to get key from %r: %s %s",
                     perspective_name,
                     type(e).__name__, str(e),
                 )
-                defer.returnValue({})
+
+            defer.returnValue({})
 
         results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
             [
@@ -421,21 +433,30 @@ class Keyring(object):
         # TODO(mark): Set the minimum_valid_until_ts to that needed by
         # the events being validated or the current time if validating
         # an incoming request.
-        query_response = yield self.client.post_json(
-            destination=perspective_name,
-            path="/_matrix/key/v2/query",
-            data={
-                u"server_keys": {
-                    server_name: {
-                        key_id: {
-                            u"minimum_valid_until_ts": 0
-                        } for key_id in key_ids
+        try:
+            query_response = yield self.client.post_json(
+                destination=perspective_name,
+                path="/_matrix/key/v2/query",
+                data={
+                    u"server_keys": {
+                        server_name: {
+                            key_id: {
+                                u"minimum_valid_until_ts": 0
+                            } for key_id in key_ids
+                        }
+                        for server_name, key_ids in server_names_and_key_ids
                     }
-                    for server_name, key_ids in server_names_and_key_ids
-                }
-            },
-            long_retries=True,
-        )
+                },
+                long_retries=True,
+            )
+        except (NotRetryingDestination, RequestSendFailed) as e:
+            raise_from(
+                KeyLookupError("Failed to connect to remote server"), e,
+            )
+        except HttpResponseException as e:
+            raise_from(
+                KeyLookupError("Remote server returned an error"), e,
+            )
 
         keys = {}
 
@@ -502,11 +523,20 @@ class Keyring(object):
             if requested_key_id in keys:
                 continue
 
-            response = yield self.client.get_json(
-                destination=server_name,
-                path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
-                ignore_backoff=True,
-            )
+            try:
+                response = yield self.client.get_json(
+                    destination=server_name,
+                    path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
+                    ignore_backoff=True,
+                )
+            except (NotRetryingDestination, RequestSendFailed) as e:
+                raise_from(
+                    KeyLookupError("Failed to connect to remote server"), e,
+                )
+            except HttpResponseException as e:
+                raise_from(
+                    KeyLookupError("Remote server returned an error"), e,
+                )
 
             if (u"signatures" not in response
                     or server_name not in response[u"signatures"]):
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 4e4f58b418..58e04d81ab 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -33,6 +33,7 @@ from synapse.api.constants import (
 )
 from synapse.api.errors import (
     CodeMessageException,
+    Codes,
     FederationDeniedError,
     HttpResponseException,
     SynapseError,
@@ -792,10 +793,25 @@ class FederationClient(FederationBase):
             defer.returnValue(content)
         except HttpResponseException as e:
             if e.code in [400, 404]:
+                err = e.to_synapse_error()
+
+                # If we receive an error response that isn't a generic error, we
+                # assume that the remote understands the v2 invite API and this
+                # is a legitimate error.
+                if err.errcode != Codes.UNKNOWN:
+                    raise err
+
+                # Otherwise, we assume that the remote server doesn't understand
+                # the v2 invite API.
+
                 if room_version in (RoomVersions.V1, RoomVersions.V2):
                     pass  # We'll fall through
                 else:
-                    raise Exception("Remote server is too old")
+                    raise SynapseError(
+                        400,
+                        "User's homeserver does not support this room version",
+                        Codes.UNSUPPORTED_ROOM_VERSION,
+                    )
             elif e.code == 403:
                 raise e.to_synapse_error()
             else:
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 3da86d4ba6..81f3b4b1ff 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -25,9 +25,10 @@ from twisted.internet import defer
 from twisted.internet.abstract import isIPAddress
 from twisted.python import failure
 
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
 from synapse.api.errors import (
     AuthError,
+    Codes,
     FederationError,
     IncompatibleRoomVersionError,
     NotFoundError,
@@ -239,8 +240,9 @@ class FederationServer(FederationBase):
                         f = failure.Failure()
                         pdu_results[event_id] = {"error": str(e)}
                         logger.error(
-                            "Failed to handle PDU %s: %s",
-                            event_id, f.getTraceback().rstrip(),
+                            "Failed to handle PDU %s",
+                            event_id,
+                            exc_info=(f.type, f.value, f.getTracebackObject()),
                         )
 
         yield concurrently_execute(
@@ -386,6 +388,13 @@ class FederationServer(FederationBase):
 
     @defer.inlineCallbacks
     def on_invite_request(self, origin, content, room_version):
+        if room_version not in KNOWN_ROOM_VERSIONS:
+            raise SynapseError(
+                400,
+                "Homeserver does not support this room version",
+                Codes.UNSUPPORTED_ROOM_VERSION,
+            )
+
         format_ver = room_version_to_event_format(room_version)
 
         pdu = event_from_pdu_json(content, format_ver)
@@ -877,6 +886,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
     def on_edu(self, edu_type, origin, content):
         """Overrides FederationHandlerRegistry
         """
+        if not self.config.use_presence and edu_type == "m.presence":
+            return
+
         handler = self.edu_handlers.get(edu_type)
         if handler:
             return super(ReplicationFederationHandlerRegistry, self).on_edu(
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index a2396ab466..ebb81be377 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -393,7 +393,7 @@ class FederationStateServlet(BaseFederationServlet):
         return self.handler.on_context_state_request(
             origin,
             context,
-            parse_string_from_args(query, "event_id", None),
+            parse_string_from_args(query, "event_id", None, required=True),
         )
 
 
@@ -404,7 +404,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
         return self.handler.on_state_ids_request(
             origin,
             room_id,
-            parse_string_from_args(query, "event_id", None),
+            parse_string_from_args(query, "event_id", None, required=True),
         )
 
 
@@ -736,7 +736,8 @@ class PublicRoomList(BaseFederationServlet):
 
         data = yield self.handler.get_local_public_room_list(
             limit, since_token,
-            network_tuple=network_tuple
+            network_tuple=network_tuple,
+            from_federation=True,
         )
         defer.returnValue((200, data))
 
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index 633c865ed8..a7eaead56b 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -113,8 +113,7 @@ class GroupsServerHandler(object):
             room_id = room_entry["room_id"]
             joined_users = yield self.store.get_users_in_room(room_id)
             entry = yield self.room_list_handler.generate_room_entry(
-                room_id, len(joined_users),
-                with_alias=False, allow_private=True,
+                room_id, len(joined_users), with_alias=False, allow_private=True,
             )
             entry = dict(entry)  # so we don't change whats cached
             entry.pop("room_id", None)
@@ -544,8 +543,7 @@ class GroupsServerHandler(object):
 
             joined_users = yield self.store.get_users_in_room(room_id)
             entry = yield self.room_list_handler.generate_room_entry(
-                room_id, len(joined_users),
-                with_alias=False, allow_private=True,
+                room_id, len(joined_users), with_alias=False, allow_private=True,
             )
 
             if not entry:
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index de839ca527..0425380e55 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -770,10 +770,26 @@ class FederationHandler(BaseHandler):
             set(auth_events.keys()) | set(state_events.keys())
         )
 
+        # We now have a chunk of events plus associated state and auth chain to
+        # persist. We do the persistence in two steps:
+        #   1. Auth events and state get persisted as outliers, plus the
+        #      backward extremities get persisted (as non-outliers).
+        #   2. The rest of the events in the chunk get persisted one by one, as
+        #      each one depends on the previous event for its state.
+        #
+        # The important thing is that events in the chunk get persisted as
+        # non-outliers, including when those events are also in the state or
+        # auth chain. Caution must therefore be taken to ensure that they are
+        # not accidentally marked as outliers.
+
+        # Step 1a: persist auth events that *don't* appear in the chunk
         ev_infos = []
         for a in auth_events.values():
-            if a.event_id in seen_events:
+            # We only want to persist auth events as outliers that we haven't
+            # seen and aren't about to persist as part of the backfilled chunk.
+            if a.event_id in seen_events or a.event_id in event_map:
                 continue
+
             a.internal_metadata.outlier = True
             ev_infos.append({
                 "event": a,
@@ -785,14 +801,21 @@ class FederationHandler(BaseHandler):
                 }
             })
 
+        # Step 1b: persist the events in the chunk we fetched state for (i.e.
+        # the backwards extremities) as non-outliers.
         for e_id in events_to_state:
+            # For paranoia we ensure that these events are marked as
+            # non-outliers
+            ev = event_map[e_id]
+            assert(not ev.internal_metadata.is_outlier())
+
             ev_infos.append({
-                "event": event_map[e_id],
+                "event": ev,
                 "state": events_to_state[e_id],
                 "auth_events": {
                     (auth_events[a_id].type, auth_events[a_id].state_key):
                     auth_events[a_id]
-                    for a_id in event_map[e_id].auth_event_ids()
+                    for a_id in ev.auth_event_ids()
                     if a_id in auth_events
                 }
             })
@@ -802,12 +825,17 @@ class FederationHandler(BaseHandler):
             backfilled=True,
         )
 
+        # Step 2: Persist the rest of the events in the chunk one by one
         events.sort(key=lambda e: e.depth)
 
         for event in events:
             if event in events_to_state:
                 continue
 
+            # For paranoia we ensure that these events are marked as
+            # non-outliers
+            assert(not event.internal_metadata.is_outlier())
+
             # We store these one at a time since each event depends on the
             # previous to work out the state.
             # TODO: We can probably do something more clever here.
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 3981fe69ce..c762b58902 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -436,10 +436,11 @@ class EventCreationHandler(object):
 
         if event.is_state():
             prev_state = yield self.deduplicate_state_event(event, context)
-            logger.info(
-                "Not bothering to persist duplicate state event %s", event.event_id,
-            )
             if prev_state is not None:
+                logger.info(
+                    "Not bothering to persist state event %s duplicated by %s",
+                    event.event_id, prev_state.event_id,
+                )
                 defer.returnValue(prev_state)
 
         yield self.handle_new_client_event(
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 9d257ecf31..e4fdae9266 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -136,7 +136,11 @@ class PaginationHandler(object):
             logger.info("[purge] complete")
             self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
         except Exception:
-            logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
+            f = Failure()
+            logger.error(
+                "[purge] failed",
+                exc_info=(f.type, f.value, f.getTracebackObject()),
+            )
             self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
         finally:
             self._purges_in_progress_by_room.discard(room_id)
@@ -254,7 +258,7 @@ class PaginationHandler(object):
             })
 
         state = None
-        if event_filter and event_filter.lazy_load_members():
+        if event_filter and event_filter.lazy_load_members() and len(events) > 0:
             # TODO: remove redundant members
 
             # FIXME: we also care about invite targets etc.
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 4c2690ba26..696469732c 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -16,8 +16,8 @@ import logging
 
 from twisted.internet import defer
 
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.types import get_domain_from_id
-from synapse.util import logcontext
 
 from ._base import BaseHandler
 
@@ -59,7 +59,9 @@ class ReceiptsHandler(BaseHandler):
         if is_new:
             # fire off a process in the background to send the receipt to
             # remote servers
-            self._push_remotes([receipt])
+            run_as_background_process(
+                'push_receipts_to_remotes', self._push_remotes, receipt
+            )
 
     @defer.inlineCallbacks
     def _received_remote_receipt(self, origin, content):
@@ -125,44 +127,42 @@ class ReceiptsHandler(BaseHandler):
 
         defer.returnValue(True)
 
-    @logcontext.preserve_fn   # caller should not yield on this
     @defer.inlineCallbacks
-    def _push_remotes(self, receipts):
-        """Given a list of receipts, works out which remote servers should be
+    def _push_remotes(self, receipt):
+        """Given a receipt, works out which remote servers should be
         poked and pokes them.
         """
         try:
-            # TODO: Some of this stuff should be coallesced.
-            for receipt in receipts:
-                room_id = receipt["room_id"]
-                receipt_type = receipt["receipt_type"]
-                user_id = receipt["user_id"]
-                event_ids = receipt["event_ids"]
-                data = receipt["data"]
-
-                users = yield self.state.get_current_user_in_room(room_id)
-                remotedomains = set(get_domain_from_id(u) for u in users)
-                remotedomains = remotedomains.copy()
-                remotedomains.discard(self.server_name)
-
-                logger.debug("Sending receipt to: %r", remotedomains)
-
-                for domain in remotedomains:
-                    self.federation.send_edu(
-                        destination=domain,
-                        edu_type="m.receipt",
-                        content={
-                            room_id: {
-                                receipt_type: {
-                                    user_id: {
-                                        "event_ids": event_ids,
-                                        "data": data,
-                                    }
+            # TODO: optimise this to move some of the work to the workers.
+            room_id = receipt["room_id"]
+            receipt_type = receipt["receipt_type"]
+            user_id = receipt["user_id"]
+            event_ids = receipt["event_ids"]
+            data = receipt["data"]
+
+            users = yield self.state.get_current_user_in_room(room_id)
+            remotedomains = set(get_domain_from_id(u) for u in users)
+            remotedomains = remotedomains.copy()
+            remotedomains.discard(self.server_name)
+
+            logger.debug("Sending receipt to: %r", remotedomains)
+
+            for domain in remotedomains:
+                self.federation.send_edu(
+                    destination=domain,
+                    edu_type="m.receipt",
+                    content={
+                        room_id: {
+                            receipt_type: {
+                                user_id: {
+                                    "event_ids": event_ids,
+                                    "data": data,
                                 }
-                            },
+                            }
                         },
-                        key=(room_id, receipt_type, user_id),
-                    )
+                    },
+                    key=(room_id, receipt_type, user_id),
+                )
         except Exception:
             logger.exception("Error pushing receipts to remote servers")
 
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 24a4cb5a83..c0e06929bd 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -460,7 +460,7 @@ class RegistrationHandler(BaseHandler):
         lines = response.split('\n')
         json = {
             "valid": lines[0] == 'true',
-            "error_url": "http://www.google.com/recaptcha/api/challenge?" +
+            "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" +
                          "error=%s" % lines[1]
         }
         defer.returnValue(json)
@@ -471,7 +471,7 @@ class RegistrationHandler(BaseHandler):
         Used only by c/s api v1
         """
         data = yield self.captcha_client.post_urlencoded_get_raw(
-            "http://www.google.com:80/recaptcha/api/verify",
+            "http://www.recaptcha.net:80/recaptcha/api/verify",
             args={
                 'privatekey': private_key,
                 'remoteip': ip_addr,
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index 13e212d669..afa508d729 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -50,16 +50,17 @@ class RoomListHandler(BaseHandler):
 
     def get_local_public_room_list(self, limit=None, since_token=None,
                                    search_filter=None,
-                                   network_tuple=EMPTY_THIRD_PARTY_ID,):
+                                   network_tuple=EMPTY_THIRD_PARTY_ID,
+                                   from_federation=False):
         """Generate a local public room list.
 
         There are multiple different lists: the main one plus one per third
         party network. A client can ask for a specific list or to return all.
 
         Args:
-            limit (int)
-            since_token (str)
-            search_filter (dict)
+            limit (int|None)
+            since_token (str|None)
+            search_filter (dict|None)
             network_tuple (ThirdPartyInstanceID): Which public list to use.
                 This can be (None, None) to indicate the main list, or a particular
                 appservice and network id to use an appservice specific one.
@@ -87,14 +88,30 @@ class RoomListHandler(BaseHandler):
         return self.response_cache.wrap(
             key,
             self._get_public_room_list,
-            limit, since_token, network_tuple=network_tuple,
+            limit, since_token,
+            network_tuple=network_tuple, from_federation=from_federation,
         )
 
     @defer.inlineCallbacks
     def _get_public_room_list(self, limit=None, since_token=None,
                               search_filter=None,
                               network_tuple=EMPTY_THIRD_PARTY_ID,
+                              from_federation=False,
                               timeout=None,):
+        """Generate a public room list.
+        Args:
+            limit (int|None): Maximum amount of rooms to return.
+            since_token (str|None)
+            search_filter (dict|None): Dictionary to filter rooms by.
+            network_tuple (ThirdPartyInstanceID): Which public list to use.
+                This can be (None, None) to indicate the main list, or a particular
+                appservice and network id to use an appservice specific one.
+                Setting to None returns all public rooms across all lists.
+            from_federation (bool): Whether this request originated from a
+                federating server or a client. Used for room filtering.
+            timeout (int|None): Amount of seconds to wait for a response before
+                timing out.
+        """
         if since_token and since_token != "END":
             since_token = RoomListNextBatch.from_token(since_token)
         else:
@@ -217,7 +234,8 @@ class RoomListHandler(BaseHandler):
             yield concurrently_execute(
                 lambda r: self._append_room_entry_to_chunk(
                     r, rooms_to_num_joined[r],
-                    chunk, limit, search_filter
+                    chunk, limit, search_filter,
+                    from_federation=from_federation,
                 ),
                 batch, 5,
             )
@@ -288,23 +306,51 @@ class RoomListHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def _append_room_entry_to_chunk(self, room_id, num_joined_users, chunk, limit,
-                                    search_filter):
+                                    search_filter, from_federation=False):
         """Generate the entry for a room in the public room list and append it
         to the `chunk` if it matches the search filter
+
+        Args:
+            room_id (str): The ID of the room.
+            num_joined_users (int): The number of joined users in the room.
+            chunk (list)
+            limit (int|None): Maximum amount of rooms to display. Function will
+                return if length of chunk is greater than limit + 1.
+            search_filter (dict|None)
+            from_federation (bool): Whether this request originated from a
+                federating server or a client. Used for room filtering.
         """
         if limit and len(chunk) > limit + 1:
             # We've already got enough, so lets just drop it.
             return
 
         result = yield self.generate_room_entry(room_id, num_joined_users)
+        if not result:
+            return
+
+        if from_federation and not result.get("m.federate", True):
+            # This is a room that other servers cannot join. Do not show them
+            # this room.
+            return
 
-        if result and _matches_room_entry(result, search_filter):
+        if _matches_room_entry(result, search_filter):
             chunk.append(result)
 
     @cachedInlineCallbacks(num_args=1, cache_context=True)
     def generate_room_entry(self, room_id, num_joined_users, cache_context,
                             with_alias=True, allow_private=False):
         """Returns the entry for a room
+
+        Args:
+            room_id (str): The room's ID.
+            num_joined_users (int): Number of users in the room.
+            cache_context: Information for cached responses.
+            with_alias (bool): Whether to return the room's aliases in the result.
+            allow_private (bool): Whether invite-only rooms should be shown.
+
+        Returns:
+            Deferred[dict|None]: Returns a room entry as a dictionary, or None if this
+            room was determined not to be shown publicly.
         """
         result = {
             "room_id": room_id,
@@ -318,6 +364,7 @@ class RoomListHandler(BaseHandler):
         event_map = yield self.store.get_events([
             event_id for key, event_id in iteritems(current_state_ids)
             if key[0] in (
+                EventTypes.Create,
                 EventTypes.JoinRules,
                 EventTypes.Name,
                 EventTypes.Topic,
@@ -334,12 +381,17 @@ class RoomListHandler(BaseHandler):
         }
 
         # Double check that this is actually a public room.
+
         join_rules_event = current_state.get((EventTypes.JoinRules, ""))
         if join_rules_event:
             join_rule = join_rules_event.content.get("join_rule", None)
             if not allow_private and join_rule and join_rule != JoinRules.PUBLIC:
                 defer.returnValue(None)
 
+        # Return whether this room is open to federation users or not
+        create_event = current_state.get((EventTypes.Create, ""))
+        result["m.federate"] = create_event.content.get("m.federate", True)
+
         if with_alias:
             aliases = yield self.store.get_aliases_for_room(
                 room_id, on_invalidate=cache_context.invalidate
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 384d8a37a2..1334c630cc 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -68,9 +68,13 @@ class MatrixFederationAgent(object):
             TLS policy to use for fetching .well-known files. None to use a default
             (browser-like) implementation.
 
-        srv_resolver (SrvResolver|None):
+        _srv_resolver (SrvResolver|None):
             SRVResolver impl to use for looking up SRV records. None to use a default
             implementation.
+
+        _well_known_cache (TTLCache|None):
+            TTLCache impl for storing cached well-known lookups. None to use a default
+            implementation.
     """
 
     def __init__(
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 6c67a25a11..16fb7935da 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -169,18 +169,18 @@ def _return_html_error(f, request):
             )
         else:
             logger.error(
-                "Failed handle request %r: %s",
+                "Failed handle request %r",
                 request,
-                f.getTraceback().rstrip(),
+                exc_info=(f.type, f.value, f.getTracebackObject()),
             )
     else:
         code = http_client.INTERNAL_SERVER_ERROR
         msg = "Internal server error"
 
         logger.error(
-            "Failed handle request %r: %s",
+            "Failed handle request %r",
             request,
-            f.getTraceback().rstrip(),
+            exc_info=(f.type, f.value, f.getTracebackObject()),
         )
 
     body = HTML_ERROR_TEMPLATE.format(
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 98d8d9560b..e65f8c63d3 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -32,9 +32,25 @@ if six.PY3:
 
 logger = logging.getLogger(__name__)
 
-http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "")
+http_push_processed_counter = Counter(
+    "synapse_http_httppusher_http_pushes_processed",
+    "Number of push notifications successfully sent",
+)
 
-http_push_failed_counter = Counter("synapse_http_httppusher_http_pushes_failed", "")
+http_push_failed_counter = Counter(
+    "synapse_http_httppusher_http_pushes_failed",
+    "Number of push notifications which failed",
+)
+
+http_badges_processed_counter = Counter(
+    "synapse_http_httppusher_badge_updates_processed",
+    "Number of badge updates successfully sent",
+)
+
+http_badges_failed_counter = Counter(
+    "synapse_http_httppusher_badge_updates_failed",
+    "Number of badge updates which failed",
+)
 
 
 class HttpPusher(object):
@@ -81,6 +97,11 @@ class HttpPusher(object):
             pusherdict['pushkey'],
         )
 
+        if self.data is None:
+            raise PusherConfigException(
+                "data can not be null for HTTP pusher"
+            )
+
         if 'url' not in self.data:
             raise PusherConfigException(
                 "'url' required in data for HTTP pusher"
@@ -346,6 +367,10 @@ class HttpPusher(object):
 
     @defer.inlineCallbacks
     def _send_badge(self, badge):
+        """
+        Args:
+            badge (int): number of unread messages
+        """
         logger.info("Sending updated badge count %d to %s", badge, self.name)
         d = {
             'notification': {
@@ -366,14 +391,11 @@ class HttpPusher(object):
             }
         }
         try:
-            resp = yield self.http_client.post_json_get_json(self.url, d)
+            yield self.http_client.post_json_get_json(self.url, d)
+            http_badges_processed_counter.inc()
         except Exception as e:
             logger.warning(
                 "Failed to send badge count to %s: %s %s",
                 self.name, type(e), e,
             )
-            defer.returnValue(False)
-        rejected = []
-        if 'rejected' in resp:
-            rejected = resp['rejected']
-        defer.returnValue(rejected)
+            http_badges_failed_counter.inc()
diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py
index 368d5094be..b33f2a357b 100644
--- a/synapse/push/pusher.py
+++ b/synapse/push/pusher.py
@@ -56,7 +56,7 @@ class PusherFactory(object):
         f = self.pusher_types.get(kind, None)
         if not f:
             return None
-        logger.info("creating %s pusher for %r", kind, pusherdict)
+        logger.debug("creating %s pusher for %r", kind, pusherdict)
         return f(self.hs, pusherdict)
 
     def _create_email_pusher(self, _hs, pusherdict):
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 5a4e73ccd6..abf1a1a9c1 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -19,6 +19,7 @@ import logging
 from twisted.internet import defer
 
 from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.push import PusherConfigException
 from synapse.push.pusher import PusherFactory
 
 logger = logging.getLogger(__name__)
@@ -140,6 +141,10 @@ class PusherPool:
 
     @defer.inlineCallbacks
     def on_new_notifications(self, min_stream_id, max_stream_id):
+        if not self.pushers:
+            # nothing to do here.
+            return
+
         try:
             users_affected = yield self.store.get_push_action_users_in_range(
                 min_stream_id, max_stream_id
@@ -155,6 +160,10 @@ class PusherPool:
 
     @defer.inlineCallbacks
     def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
+        if not self.pushers:
+            # nothing to do here.
+            return
+
         try:
             # Need to subtract 1 from the minimum because the lower bound here
             # is not inclusive
@@ -214,6 +223,15 @@ class PusherPool:
         """
         try:
             p = self.pusher_factory.create_pusher(pusherdict)
+        except PusherConfigException as e:
+            logger.warning(
+                "Pusher incorrectly configured user=%s, appid=%s, pushkey=%s: %s",
+                pusherdict.get('user_name'),
+                pusherdict.get('app_id'),
+                pusherdict.get('pushkey'),
+                e,
+            )
+            return
         except Exception:
             logger.exception("Couldn't start a pusher: caught Exception")
             return
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index 1353a32d00..817d1f67f9 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -59,12 +59,7 @@ class BaseSlavedStore(SQLBaseStore):
                     members_changed = set(row.keys[1:])
                     self._invalidate_state_caches(room_id, members_changed)
                 else:
-                    try:
-                        getattr(self, row.cache_func).invalidate(tuple(row.keys))
-                    except AttributeError:
-                        # We probably haven't pulled in the cache in this worker,
-                        # which is fine.
-                        pass
+                    self._attempt_to_invalidate_cache(row.cache_func, tuple(row.keys))
 
     def _invalidate_cache_and_stream(self, txn, cache_func, keys):
         txn.call_after(cache_func.invalidate, keys)
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
index 92447b00d4..9e530defe0 100644
--- a/synapse/replication/slave/storage/presence.py
+++ b/synapse/replication/slave/storage/presence.py
@@ -54,8 +54,11 @@ class SlavedPresenceStore(BaseSlavedStore):
 
     def stream_positions(self):
         result = super(SlavedPresenceStore, self).stream_positions()
-        position = self._presence_id_gen.get_current_token()
-        result["presence"] = position
+
+        if self.hs.config.use_presence:
+            position = self._presence_id_gen.get_current_token()
+            result["presence"] = position
+
         return result
 
     def process_replication_rows(self, stream_name, token, rows):
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 586dddb40b..e558f90e1a 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -39,7 +39,7 @@ class ReplicationClientFactory(ReconnectingClientFactory):
     Accepts a handler that will be called when new data is available or data
     is required.
     """
-    maxDelay = 5  # Try at least once every N seconds
+    maxDelay = 30  # Try at least once every N seconds
 
     def __init__(self, hs, client_name, handler):
         self.client_name = client_name
@@ -54,7 +54,6 @@ class ReplicationClientFactory(ReconnectingClientFactory):
 
     def buildProtocol(self, addr):
         logger.info("Connected to replication: %r", addr)
-        self.resetDelay()
         return ClientReplicationStreamProtocol(
             self.client_name, self.server_name, self._clock, self.handler
         )
@@ -90,15 +89,18 @@ class ReplicationClientHandler(object):
         # Used for tests.
         self.awaiting_syncs = {}
 
+        # The factory used to create connections.
+        self.factory = None
+
     def start_replication(self, hs):
         """Helper method to start a replication connection to the remote server
         using TCP.
         """
         client_name = hs.config.worker_name
-        factory = ReplicationClientFactory(hs, client_name, self)
+        self.factory = ReplicationClientFactory(hs, client_name, self)
         host = hs.config.worker_replication_host
         port = hs.config.worker_replication_port
-        hs.get_reactor().connectTCP(host, port, factory)
+        hs.get_reactor().connectTCP(host, port, self.factory)
 
     def on_rdata(self, stream_name, token, rows):
         """Called when we get new replication data. By default this just pokes
@@ -140,6 +142,7 @@ class ReplicationClientHandler(object):
             args["account_data"] = user_account_data
         elif room_account_data:
             args["account_data"] = room_account_data
+
         return args
 
     def get_currently_syncing_users(self):
@@ -204,3 +207,14 @@ class ReplicationClientHandler(object):
             for cmd in self.pending_commands:
                 connection.send_command(cmd)
             self.pending_commands = []
+
+    def finished_connecting(self):
+        """Called when we have successfully subscribed and caught up to all
+        streams we're interested in.
+        """
+        logger.info("Finished connecting to server")
+
+        # We don't reset the delay any earlier as otherwise if there is a
+        # problem during start up we'll end up tight looping connecting to the
+        # server.
+        self.factory.resetDelay()
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index 327556f6a1..2098c32a77 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -127,8 +127,11 @@ class RdataCommand(Command):
 
 
 class PositionCommand(Command):
-    """Sent by the client to tell the client the stream postition without
+    """Sent by the server to tell the client the stream postition without
     needing to send an RDATA.
+
+    Sent to the client after all missing updates for a stream have been sent
+    to the client and they're now up to date.
     """
     NAME = "POSITION"
 
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 0b3fe6cbf5..49ae5b3355 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -268,7 +268,17 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
         if "\n" in string:
             raise Exception("Unexpected newline in command: %r", string)
 
-        self.sendLine(string.encode("utf-8"))
+        encoded_string = string.encode("utf-8")
+
+        if len(encoded_string) > self.MAX_LENGTH:
+            raise Exception(
+                "Failed to send command %s as too long (%d > %d)" % (
+                    cmd.NAME,
+                    len(encoded_string), self.MAX_LENGTH,
+                )
+            )
+
+        self.sendLine(encoded_string)
 
         self.last_sent_command = self.clock.time_msec()
 
@@ -361,6 +371,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
     def id(self):
         return "%s-%s" % (self.name, self.conn_id)
 
+    def lineLengthExceeded(self, line):
+        """Called when we receive a line that is above the maximum line length
+        """
+        self.send_error("Line length exceeded")
+
 
 class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
     VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS
@@ -511,6 +526,11 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
         self.server_name = server_name
         self.handler = handler
 
+        # Set of stream names that have been subscribe to, but haven't yet
+        # caught up with. This is used to track when the client has been fully
+        # connected to the remote.
+        self.streams_connecting = set()
+
         # Map of stream to batched updates. See RdataCommand for info on how
         # batching works.
         self.pending_batches = {}
@@ -533,6 +553,10 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
         # We've now finished connecting to so inform the client handler
         self.handler.update_connection(self)
 
+        # This will happen if we don't actually subscribe to any streams
+        if not self.streams_connecting:
+            self.handler.finished_connecting()
+
     def on_SERVER(self, cmd):
         if cmd.data != self.server_name:
             logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data)
@@ -562,6 +586,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
             return self.handler.on_rdata(stream_name, cmd.token, rows)
 
     def on_POSITION(self, cmd):
+        # When we get a `POSITION` command it means we've finished getting
+        # missing updates for the given stream, and are now up to date.
+        self.streams_connecting.discard(cmd.stream_name)
+        if not self.streams_connecting:
+            self.handler.finished_connecting()
+
         return self.handler.on_position(cmd.stream_name, cmd.token)
 
     def on_SYNC(self, cmd):
@@ -578,6 +608,8 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
             self.id(), stream_name, token
         )
 
+        self.streams_connecting.add(stream_name)
+
         self.send_command(ReplicateCommand(stream_name, token))
 
     def on_connection_closed(self):
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index f7bb710642..ac035c7735 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -33,7 +33,7 @@ RECAPTCHA_TEMPLATE = """
 <title>Authentication</title>
 <meta name='viewport' content='width=device-width, initial-scale=1,
     user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
-<script src="https://www.google.com/recaptcha/api.js"
+<script src="https://www.recaptcha.net/recaptcha/api.js"
     async defer></script>
 <script src="//code.jquery.com/jquery-1.11.2.min.js"></script>
 <link rel="stylesheet" href="/_matrix/static/client/register/style.css">
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index efe42a429d..fece1ef0b8 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -133,8 +134,15 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam
 
     logger.debug("Responding to media request with responder %s")
     add_file_headers(request, media_type, file_size, upload_name)
-    with responder:
-        yield responder.write_to_consumer(request)
+    try:
+        with responder:
+            yield responder.write_to_consumer(request)
+    except Exception as e:
+        # The majority of the time this will be due to the client having gone
+        # away. Unfortunately, Twisted simply throws a generic exception at us
+        # in that case.
+        logger.warning("Failed to write to consumer: %s %s", type(e), e)
+
     finish_request(request)
 
 
@@ -206,8 +214,7 @@ def get_filename_from_headers(headers):
     Content-Disposition HTTP header.
 
     Args:
-        headers (twisted.web.http_headers.Headers): The HTTP
-            request headers.
+        headers (dict[bytes, list[bytes]]): The HTTP request headers.
 
     Returns:
         A Unicode string of the filename, or None.
@@ -218,23 +225,12 @@ def get_filename_from_headers(headers):
     if not content_disposition[0]:
         return
 
-    # dict of unicode: bytes, corresponding to the key value sections of the
-    # Content-Disposition header.
-    params = {}
-    parts = content_disposition[0].split(b";")
-    for i in parts:
-        # Split into key-value pairs, if able
-        # We don't care about things like `inline`, so throw it out
-        if b"=" not in i:
-            continue
-
-        key, value = i.strip().split(b"=")
-        params[key.decode('ascii')] = value
+    _, params = _parse_header(content_disposition[0])
 
     upload_name = None
 
     # First check if there is a valid UTF-8 filename
-    upload_name_utf8 = params.get("filename*", None)
+    upload_name_utf8 = params.get(b"filename*", None)
     if upload_name_utf8:
         if upload_name_utf8.lower().startswith(b"utf-8''"):
             upload_name_utf8 = upload_name_utf8[7:]
@@ -260,12 +256,68 @@ def get_filename_from_headers(headers):
 
     # If there isn't check for an ascii name.
     if not upload_name:
-        upload_name_ascii = params.get("filename", None)
+        upload_name_ascii = params.get(b"filename", None)
         if upload_name_ascii and is_ascii(upload_name_ascii):
-            # Make sure there's no %-quoted bytes. If there is, reject it as
-            # non-valid ASCII.
-            if b"%" not in upload_name_ascii:
-                upload_name = upload_name_ascii.decode('ascii')
+            upload_name = upload_name_ascii.decode('ascii')
 
     # This may be None here, indicating we did not find a matching name.
     return upload_name
+
+
+def _parse_header(line):
+    """Parse a Content-type like header.
+
+    Cargo-culted from `cgi`, but works on bytes rather than strings.
+
+    Args:
+        line (bytes): header to be parsed
+
+    Returns:
+        Tuple[bytes, dict[bytes, bytes]]:
+            the main content-type, followed by the parameter dictionary
+    """
+    parts = _parseparam(b';' + line)
+    key = next(parts)
+    pdict = {}
+    for p in parts:
+        i = p.find(b'=')
+        if i >= 0:
+            name = p[:i].strip().lower()
+            value = p[i + 1:].strip()
+
+            # strip double-quotes
+            if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
+                value = value[1:-1]
+                value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
+            pdict[name] = value
+
+    return key, pdict
+
+
+def _parseparam(s):
+    """Generator which splits the input on ;, respecting double-quoted sequences
+
+    Cargo-culted from `cgi`, but works on bytes rather than strings.
+
+    Args:
+        s (bytes): header to be parsed
+
+    Returns:
+        Iterable[bytes]: the split input
+    """
+    while s[:1] == b';':
+        s = s[1:]
+
+        # look for the next ;
+        end = s.find(b';')
+
+        # if there is an odd number of " marks between here and the next ;, skip to the
+        # next ; instead
+        while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
+            end = s.find(b';', end + 1)
+
+        if end < 0:
+            end = len(s)
+        f = s[:end]
+        yield f.strip()
+        s = s[end:]
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 06cd083a74..fb8df56cd5 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -7,9 +7,9 @@ import synapse.handlers.auth
 import synapse.handlers.deactivate_account
 import synapse.handlers.device
 import synapse.handlers.e2e_keys
+import synapse.handlers.message
 import synapse.handlers.room
 import synapse.handlers.room_member
-import synapse.handlers.message
 import synapse.handlers.set_password
 import synapse.rest.media.v1.media_repository
 import synapse.server_notices.server_notices_manager
diff --git a/synapse/static/client/register/index.html b/synapse/static/client/register/index.html
index 886f2edd1f..6edc4deb03 100644
--- a/synapse/static/client/register/index.html
+++ b/synapse/static/client/register/index.html
@@ -4,7 +4,7 @@
 <meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'> 
 <link rel="stylesheet" href="style.css">
 <script src="js/jquery-2.1.3.min.js"></script>
-<script src="https://www.google.com/recaptcha/api/js/recaptcha_ajax.js"></script>
+<script src="https://www.recaptcha.net/recaptcha/api/js/recaptcha_ajax.js"></script>
 <script src="register_config.js"></script>
 <script src="js/register.js"></script>
 </head>
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 3d895da43c..a0333d5309 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -30,6 +30,7 @@ from synapse.api.errors import StoreError
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
 from synapse.types import get_domain_from_id
+from synapse.util import batch_iter
 from synapse.util.caches.descriptors import Cache
 from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
 from synapse.util.stringutils import exception_to_unicode
@@ -1327,10 +1328,16 @@ class SQLBaseStore(object):
         """
         txn.call_after(self._invalidate_state_caches, room_id, members_changed)
 
-        keys = itertools.chain([room_id], members_changed)
-        self._send_invalidation_to_replication(
-            txn, _CURRENT_STATE_CACHE_NAME, keys,
-        )
+        # We need to be careful that the size of the `members_changed` list
+        # isn't so large that it causes problems sending over replication, so we
+        # send them in chunks.
+        # Max line length is 16K, and max user ID length is 255, so 50 should
+        # be safe.
+        for chunk in batch_iter(members_changed, 50):
+            keys = itertools.chain([room_id], chunk)
+            self._send_invalidation_to_replication(
+                txn, _CURRENT_STATE_CACHE_NAME, keys,
+            )
 
     def _invalidate_state_caches(self, room_id, members_changed):
         """Invalidates caches that are based on the current state, but does
@@ -1342,15 +1349,43 @@ class SQLBaseStore(object):
                 changed
         """
         for member in members_changed:
-            self.get_rooms_for_user_with_stream_ordering.invalidate((member,))
+            self._attempt_to_invalidate_cache(
+                "get_rooms_for_user_with_stream_ordering", (member,),
+            )
 
         for host in set(get_domain_from_id(u) for u in members_changed):
-            self.is_host_joined.invalidate((room_id, host))
-            self.was_host_joined.invalidate((room_id, host))
+            self._attempt_to_invalidate_cache(
+                "is_host_joined", (room_id, host,),
+            )
+            self._attempt_to_invalidate_cache(
+                "was_host_joined", (room_id, host,),
+            )
+
+        self._attempt_to_invalidate_cache(
+            "get_users_in_room", (room_id,),
+        )
+        self._attempt_to_invalidate_cache(
+            "get_room_summary", (room_id,),
+        )
+        self._attempt_to_invalidate_cache(
+            "get_current_state_ids", (room_id,),
+        )
+
+    def _attempt_to_invalidate_cache(self, cache_name, key):
+        """Attempts to invalidate the cache of the given name, ignoring if the
+        cache doesn't exist. Mainly used for invalidating caches on workers,
+        where they may not have the cache.
 
-        self.get_users_in_room.invalidate((room_id,))
-        self.get_room_summary.invalidate((room_id,))
-        self.get_current_state_ids.invalidate((room_id,))
+        Args:
+            cache_name (str)
+            key (tuple)
+        """
+        try:
+            getattr(self, cache_name).invalidate(key)
+        except AttributeError:
+            # We probably haven't pulled in the cache in this worker,
+            # which is fine.
+            pass
 
     def _send_invalidation_to_replication(self, txn, cache_name, keys):
         """Notifies replication that given cache has been invalidated.
@@ -1568,6 +1603,14 @@ class SQLBaseStore(object):
 
         return cls.cursor_to_dict(txn)
 
+    @property
+    def database_engine_name(self):
+        return self.database_engine.module.__name__
+
+    def get_server_version(self):
+        """Returns a string describing the server version number"""
+        return self.database_engine.server_version
+
 
 class _RollbackButIsFineException(Exception):
     """ This exception is used to rollback a transaction without implying
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 4004427c7b..dc3238501c 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -23,6 +23,7 @@ class PostgresEngine(object):
         self.module = database_module
         self.module.extensions.register_type(self.module.extensions.UNICODE)
         self.synchronous_commit = database_config.get("synchronous_commit", True)
+        self._version = None   # unknown as yet
 
     def check_database(self, txn):
         txn.execute("SHOW SERVER_ENCODING")
@@ -87,3 +88,27 @@ class PostgresEngine(object):
         """
         txn.execute("SELECT nextval('state_group_id_seq')")
         return txn.fetchone()[0]
+
+    @property
+    def server_version(self):
+        """Returns a string giving the server version. For example: '8.1.5'
+
+        Returns:
+            string
+        """
+        # note that this is a bit of a hack because it relies on on_new_connection
+        # having been called at least once. Still, that should be a safe bet here.
+        numver = self._version
+        assert numver is not None
+
+        # https://www.postgresql.org/docs/current/libpq-status.html#LIBPQ-PQSERVERVERSION
+        if numver >= 100000:
+            return "%i.%i" % (
+                numver / 10000, numver % 10000,
+            )
+        else:
+            return "%i.%i.%i" % (
+                numver / 10000,
+                (numver % 10000) / 100,
+                numver % 100,
+            )
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index 059ab81055..1bcd5b99a4 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -70,6 +70,15 @@ class Sqlite3Engine(object):
             self._current_state_group_id += 1
             return self._current_state_group_id
 
+    @property
+    def server_version(self):
+        """Gets a string giving the server version. For example: '3.22.0'
+
+        Returns:
+            string
+        """
+        return "%i.%i.%i" % self.module.sqlite_version_info
+
 
 # Following functions taken from: https://github.com/coleifer/peewee
 
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 9b9572890b..9b6c28892c 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -295,6 +295,39 @@ class RegistrationWorkerStore(SQLBaseStore):
             return ret['user_id']
         return None
 
+    @defer.inlineCallbacks
+    def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
+        yield self._simple_upsert("user_threepids", {
+            "medium": medium,
+            "address": address,
+        }, {
+            "user_id": user_id,
+            "validated_at": validated_at,
+            "added_at": added_at,
+        })
+
+    @defer.inlineCallbacks
+    def user_get_threepids(self, user_id):
+        ret = yield self._simple_select_list(
+            "user_threepids", {
+                "user_id": user_id
+            },
+            ['medium', 'address', 'validated_at', 'added_at'],
+            'user_get_threepids'
+        )
+        defer.returnValue(ret)
+
+    def user_delete_threepid(self, user_id, medium, address):
+        return self._simple_delete(
+            "user_threepids",
+            keyvalues={
+                "user_id": user_id,
+                "medium": medium,
+                "address": address,
+            },
+            desc="user_delete_threepids",
+        )
+
 
 class RegistrationStore(RegistrationWorkerStore,
                         background_updates.BackgroundUpdateStore):
@@ -633,39 +666,6 @@ class RegistrationStore(RegistrationWorkerStore,
         defer.returnValue(res if res else False)
 
     @defer.inlineCallbacks
-    def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
-        yield self._simple_upsert("user_threepids", {
-            "medium": medium,
-            "address": address,
-        }, {
-            "user_id": user_id,
-            "validated_at": validated_at,
-            "added_at": added_at,
-        })
-
-    @defer.inlineCallbacks
-    def user_get_threepids(self, user_id):
-        ret = yield self._simple_select_list(
-            "user_threepids", {
-                "user_id": user_id
-            },
-            ['medium', 'address', 'validated_at', 'added_at'],
-            'user_get_threepids'
-        )
-        defer.returnValue(ret)
-
-    def user_delete_threepid(self, user_id, medium, address):
-        return self._simple_delete(
-            "user_threepids",
-            keyvalues={
-                "user_id": user_id,
-                "medium": medium,
-                "address": address,
-            },
-            desc="user_delete_threepids",
-        )
-
-    @defer.inlineCallbacks
     def save_or_get_3pid_guest_access_token(
             self, medium, address, access_token, inviter_user_id
     ):
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 36e136cded..13486930fb 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -24,13 +24,17 @@ from synapse.api.errors import AuthError
 from synapse.types import UserID
 
 from tests import unittest
+from tests.utils import register_federation_servlets
 
-from ..utils import (
-    DeferredMockCallable,
-    MockClock,
-    MockHttpResource,
-    setup_test_homeserver,
-)
+# Some local users to test with
+U_APPLE = UserID.from_string("@apple:test")
+U_BANANA = UserID.from_string("@banana:test")
+
+# Remote user
+U_ONION = UserID.from_string("@onion:farm")
+
+# Test room id
+ROOM_ID = "a-room"
 
 
 def _expect_edu_transaction(edu_type, content, origin="test"):
@@ -46,30 +50,21 @@ def _make_edu_transaction_json(edu_type, content):
     return json.dumps(_expect_edu_transaction(edu_type, content)).encode('utf8')
 
 
-class TypingNotificationsTestCase(unittest.TestCase):
-    """Tests typing notifications to rooms."""
-
-    @defer.inlineCallbacks
-    def setUp(self):
-        self.clock = MockClock()
+class TypingNotificationsTestCase(unittest.HomeserverTestCase):
+    servlets = [register_federation_servlets]
 
-        self.mock_http_client = Mock(spec=[])
-        self.mock_http_client.put_json = DeferredMockCallable()
+    def make_homeserver(self, reactor, clock):
+        # we mock out the keyring so as to skip the authentication check on the
+        # federation API call.
+        mock_keyring = Mock(spec=["verify_json_for_server"])
+        mock_keyring.verify_json_for_server.return_value = defer.succeed(True)
 
-        self.mock_federation_resource = MockHttpResource()
-
-        mock_notifier = Mock()
-        self.on_new_event = mock_notifier.on_new_event
+        # we mock out the federation client too
+        mock_federation_client = Mock(spec=["put_json"])
+        mock_federation_client.put_json.return_value = defer.succeed((200, "OK"))
 
-        self.auth = Mock(spec=[])
-        self.state_handler = Mock()
-
-        hs = yield setup_test_homeserver(
-            self.addCleanup,
-            "test",
-            auth=self.auth,
-            clock=self.clock,
-            datastore=Mock(
+        hs = self.setup_test_homeserver(
+            datastore=(Mock(
                 spec=[
                     # Bits that Federation needs
                     "prep_send_transaction",
@@ -82,16 +77,21 @@ class TypingNotificationsTestCase(unittest.TestCase):
                     "get_user_directory_stream_pos",
                     "get_current_state_deltas",
                 ]
-            ),
-            state_handler=self.state_handler,
-            handlers=Mock(),
-            notifier=mock_notifier,
-            resource_for_client=Mock(),
-            resource_for_federation=self.mock_federation_resource,
-            http_client=self.mock_http_client,
-            keyring=Mock(),
+            )),
+            notifier=Mock(),
+            http_client=mock_federation_client,
+            keyring=mock_keyring,
         )
 
+        return hs
+
+    def prepare(self, reactor, clock, hs):
+        # the tests assume that we are starting at unix time 1000
+        reactor.pump((1000, ))
+
+        mock_notifier = hs.get_notifier()
+        self.on_new_event = mock_notifier.on_new_event
+
         self.handler = hs.get_typing_handler()
 
         self.event_source = hs.get_event_sources().sources["typing"]
@@ -109,13 +109,12 @@ class TypingNotificationsTestCase(unittest.TestCase):
 
         self.datastore.get_received_txn_response = get_received_txn_response
 
-        self.room_id = "a-room"
-
         self.room_members = []
 
         def check_joined_room(room_id, user_id):
             if user_id not in [u.to_string() for u in self.room_members]:
                 raise AuthError(401, "User is not in the room")
+        hs.get_auth().check_joined_room = check_joined_room
 
         def get_joined_hosts_for_room(room_id):
             return set(member.domain for member in self.room_members)
@@ -124,8 +123,7 @@ class TypingNotificationsTestCase(unittest.TestCase):
 
         def get_current_user_in_room(room_id):
             return set(str(u) for u in self.room_members)
-
-        self.state_handler.get_current_user_in_room = get_current_user_in_room
+        hs.get_state_handler().get_current_user_in_room = get_current_user_in_room
 
         self.datastore.get_user_directory_stream_pos.return_value = (
             # we deliberately return a non-None stream pos to avoid doing an initial_spam
@@ -134,230 +132,208 @@ class TypingNotificationsTestCase(unittest.TestCase):
 
         self.datastore.get_current_state_deltas.return_value = None
 
-        self.auth.check_joined_room = check_joined_room
-
         self.datastore.get_to_device_stream_token = lambda: 0
         self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
         self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
 
-        # Some local users to test with
-        self.u_apple = UserID.from_string("@apple:test")
-        self.u_banana = UserID.from_string("@banana:test")
-
-        # Remote user
-        self.u_onion = UserID.from_string("@onion:farm")
-
-    @defer.inlineCallbacks
     def test_started_typing_local(self):
-        self.room_members = [self.u_apple, self.u_banana]
+        self.room_members = [U_APPLE, U_BANANA]
 
         self.assertEquals(self.event_source.get_current_key(), 0)
 
-        yield self.handler.started_typing(
-            target_user=self.u_apple,
-            auth_user=self.u_apple,
-            room_id=self.room_id,
+        self.successResultOf(self.handler.started_typing(
+            target_user=U_APPLE,
+            auth_user=U_APPLE,
+            room_id=ROOM_ID,
             timeout=20000,
-        )
+        ))
 
         self.on_new_event.assert_has_calls(
-            [call('typing_key', 1, rooms=[self.room_id])]
+            [call('typing_key', 1, rooms=[ROOM_ID])]
         )
 
         self.assertEquals(self.event_source.get_current_key(), 1)
-        events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id], from_key=0
+        events = self.event_source.get_new_events(
+            room_ids=[ROOM_ID], from_key=0
         )
         self.assertEquals(
             events[0],
             [
                 {
                     "type": "m.typing",
-                    "room_id": self.room_id,
-                    "content": {"user_ids": [self.u_apple.to_string()]},
+                    "room_id": ROOM_ID,
+                    "content": {"user_ids": [U_APPLE.to_string()]},
                 }
             ],
         )
 
-    @defer.inlineCallbacks
     def test_started_typing_remote_send(self):
-        self.room_members = [self.u_apple, self.u_onion]
-
-        put_json = self.mock_http_client.put_json
-        put_json.expect_call_and_return(
-            call(
-                "farm",
-                path="/_matrix/federation/v1/send/1000000/",
-                data=_expect_edu_transaction(
-                    "m.typing",
-                    content={
-                        "room_id": self.room_id,
-                        "user_id": self.u_apple.to_string(),
-                        "typing": True,
-                    },
-                ),
-                json_data_callback=ANY,
-                long_retries=True,
-                backoff_on_404=True,
-            ),
-            defer.succeed((200, "OK")),
-        )
+        self.room_members = [U_APPLE, U_ONION]
 
-        yield self.handler.started_typing(
-            target_user=self.u_apple,
-            auth_user=self.u_apple,
-            room_id=self.room_id,
+        self.successResultOf(self.handler.started_typing(
+            target_user=U_APPLE,
+            auth_user=U_APPLE,
+            room_id=ROOM_ID,
             timeout=20000,
-        )
+        ))
 
-        yield put_json.await_calls()
+        put_json = self.hs.get_http_client().put_json
+        put_json.assert_called_once_with(
+            "farm",
+            path="/_matrix/federation/v1/send/1000000/",
+            data=_expect_edu_transaction(
+                "m.typing",
+                content={
+                    "room_id": ROOM_ID,
+                    "user_id": U_APPLE.to_string(),
+                    "typing": True,
+                },
+            ),
+            json_data_callback=ANY,
+            long_retries=True,
+            backoff_on_404=True,
+        )
 
-    @defer.inlineCallbacks
     def test_started_typing_remote_recv(self):
-        self.room_members = [self.u_apple, self.u_onion]
+        self.room_members = [U_APPLE, U_ONION]
 
         self.assertEquals(self.event_source.get_current_key(), 0)
 
-        (code, response) = yield self.mock_federation_resource.trigger(
+        (request, channel) = self.make_request(
             "PUT",
             "/_matrix/federation/v1/send/1000000/",
             _make_edu_transaction_json(
                 "m.typing",
                 content={
-                    "room_id": self.room_id,
-                    "user_id": self.u_onion.to_string(),
+                    "room_id": ROOM_ID,
+                    "user_id": U_ONION.to_string(),
                     "typing": True,
                 },
             ),
             federation_auth_origin=b'farm',
         )
+        self.render(request)
+        self.assertEqual(channel.code, 200)
 
         self.on_new_event.assert_has_calls(
-            [call('typing_key', 1, rooms=[self.room_id])]
+            [call('typing_key', 1, rooms=[ROOM_ID])]
         )
 
         self.assertEquals(self.event_source.get_current_key(), 1)
-        events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id], from_key=0
+        events = self.event_source.get_new_events(
+            room_ids=[ROOM_ID], from_key=0
         )
         self.assertEquals(
             events[0],
             [
                 {
                     "type": "m.typing",
-                    "room_id": self.room_id,
-                    "content": {"user_ids": [self.u_onion.to_string()]},
+                    "room_id": ROOM_ID,
+                    "content": {"user_ids": [U_ONION.to_string()]},
                 }
             ],
         )
 
-    @defer.inlineCallbacks
     def test_stopped_typing(self):
-        self.room_members = [self.u_apple, self.u_banana, self.u_onion]
-
-        put_json = self.mock_http_client.put_json
-        put_json.expect_call_and_return(
-            call(
-                "farm",
-                path="/_matrix/federation/v1/send/1000000/",
-                data=_expect_edu_transaction(
-                    "m.typing",
-                    content={
-                        "room_id": self.room_id,
-                        "user_id": self.u_apple.to_string(),
-                        "typing": False,
-                    },
-                ),
-                json_data_callback=ANY,
-                long_retries=True,
-                backoff_on_404=True,
-            ),
-            defer.succeed((200, "OK")),
-        )
+        self.room_members = [U_APPLE, U_BANANA, U_ONION]
 
         # Gut-wrenching
         from synapse.handlers.typing import RoomMember
 
-        member = RoomMember(self.room_id, self.u_apple.to_string())
+        member = RoomMember(ROOM_ID, U_APPLE.to_string())
         self.handler._member_typing_until[member] = 1002000
-        self.handler._room_typing[self.room_id] = set([self.u_apple.to_string()])
+        self.handler._room_typing[ROOM_ID] = set([U_APPLE.to_string()])
 
         self.assertEquals(self.event_source.get_current_key(), 0)
 
-        yield self.handler.stopped_typing(
-            target_user=self.u_apple, auth_user=self.u_apple, room_id=self.room_id
-        )
+        self.successResultOf(self.handler.stopped_typing(
+            target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID
+        ))
 
         self.on_new_event.assert_has_calls(
-            [call('typing_key', 1, rooms=[self.room_id])]
+            [call('typing_key', 1, rooms=[ROOM_ID])]
         )
 
-        yield put_json.await_calls()
+        put_json = self.hs.get_http_client().put_json
+        put_json.assert_called_once_with(
+            "farm",
+            path="/_matrix/federation/v1/send/1000000/",
+            data=_expect_edu_transaction(
+                "m.typing",
+                content={
+                    "room_id": ROOM_ID,
+                    "user_id": U_APPLE.to_string(),
+                    "typing": False,
+                },
+            ),
+            json_data_callback=ANY,
+            long_retries=True,
+            backoff_on_404=True,
+        )
 
         self.assertEquals(self.event_source.get_current_key(), 1)
-        events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id], from_key=0
+        events = self.event_source.get_new_events(
+            room_ids=[ROOM_ID], from_key=0
         )
         self.assertEquals(
             events[0],
             [
                 {
                     "type": "m.typing",
-                    "room_id": self.room_id,
+                    "room_id": ROOM_ID,
                     "content": {"user_ids": []},
                 }
             ],
         )
 
-    @defer.inlineCallbacks
     def test_typing_timeout(self):
-        self.room_members = [self.u_apple, self.u_banana]
+        self.room_members = [U_APPLE, U_BANANA]
 
         self.assertEquals(self.event_source.get_current_key(), 0)
 
-        yield self.handler.started_typing(
-            target_user=self.u_apple,
-            auth_user=self.u_apple,
-            room_id=self.room_id,
+        self.successResultOf(self.handler.started_typing(
+            target_user=U_APPLE,
+            auth_user=U_APPLE,
+            room_id=ROOM_ID,
             timeout=10000,
-        )
+        ))
 
         self.on_new_event.assert_has_calls(
-            [call('typing_key', 1, rooms=[self.room_id])]
+            [call('typing_key', 1, rooms=[ROOM_ID])]
         )
         self.on_new_event.reset_mock()
 
         self.assertEquals(self.event_source.get_current_key(), 1)
-        events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id], from_key=0
+        events = self.event_source.get_new_events(
+            room_ids=[ROOM_ID], from_key=0
         )
         self.assertEquals(
             events[0],
             [
                 {
                     "type": "m.typing",
-                    "room_id": self.room_id,
-                    "content": {"user_ids": [self.u_apple.to_string()]},
+                    "room_id": ROOM_ID,
+                    "content": {"user_ids": [U_APPLE.to_string()]},
                 }
             ],
         )
 
-        self.clock.advance_time(16)
+        self.reactor.pump([16, ])
 
         self.on_new_event.assert_has_calls(
-            [call('typing_key', 2, rooms=[self.room_id])]
+            [call('typing_key', 2, rooms=[ROOM_ID])]
         )
 
         self.assertEquals(self.event_source.get_current_key(), 2)
-        events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id], from_key=1
+        events = self.event_source.get_new_events(
+            room_ids=[ROOM_ID], from_key=1
         )
         self.assertEquals(
             events[0],
             [
                 {
                     "type": "m.typing",
-                    "room_id": self.room_id,
+                    "room_id": ROOM_ID,
                     "content": {"user_ids": []},
                 }
             ],
@@ -365,29 +341,29 @@ class TypingNotificationsTestCase(unittest.TestCase):
 
         # SYN-230 - see if we can still set after timeout
 
-        yield self.handler.started_typing(
-            target_user=self.u_apple,
-            auth_user=self.u_apple,
-            room_id=self.room_id,
+        self.successResultOf(self.handler.started_typing(
+            target_user=U_APPLE,
+            auth_user=U_APPLE,
+            room_id=ROOM_ID,
             timeout=10000,
-        )
+        ))
 
         self.on_new_event.assert_has_calls(
-            [call('typing_key', 3, rooms=[self.room_id])]
+            [call('typing_key', 3, rooms=[ROOM_ID])]
         )
         self.on_new_event.reset_mock()
 
         self.assertEquals(self.event_source.get_current_key(), 3)
-        events = yield self.event_source.get_new_events(
-            room_ids=[self.room_id], from_key=0
+        events = self.event_source.get_new_events(
+            room_ids=[ROOM_ID], from_key=0
         )
         self.assertEquals(
             events[0],
             [
                 {
                     "type": "m.typing",
-                    "room_id": self.room_id,
-                    "content": {"user_ids": [self.u_apple.to_string()]},
+                    "room_id": ROOM_ID,
+                    "content": {"user_ids": [U_APPLE.to_string()]},
                 }
             ],
         )
diff --git a/tests/rest/media/v1/test_base.py b/tests/rest/media/v1/test_base.py
new file mode 100644
index 0000000000..af8f74eb42
--- /dev/null
+++ b/tests/rest/media/v1/test_base.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.rest.media.v1._base import get_filename_from_headers
+
+from tests import unittest
+
+
+class GetFileNameFromHeadersTests(unittest.TestCase):
+    # input -> expected result
+    TEST_CASES = {
+        b"inline; filename=abc.txt": u"abc.txt",
+        b'inline; filename="azerty"': u"azerty",
+        b'inline; filename="aze%20rty"': u"aze%20rty",
+        b'inline; filename="aze\"rty"': u'aze"rty',
+        b'inline; filename="azer;ty"': u"azer;ty",
+
+        b"inline; filename*=utf-8''foo%C2%A3bar": u"foo£bar",
+    }
+
+    def tests(self):
+        for hdr, expected in self.TEST_CASES.items():
+            res = get_filename_from_headers(
+                {
+                    b'Content-Disposition': [hdr],
+                },
+            )
+            self.assertEqual(
+                res, expected,
+                "expected output for %s to be %s but was %s" % (
+                    hdr, expected, res,
+                )
+            )
diff --git a/tests/server.py b/tests/server.py
index fc1e76d146..37069afdda 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -137,6 +137,7 @@ def make_request(
     access_token=None,
     request=SynapseRequest,
     shorthand=True,
+    federation_auth_origin=None,
 ):
     """
     Make a web request using the given method and path, feed it the
@@ -150,9 +151,11 @@ def make_request(
         a dict.
         shorthand: Whether to try and be helpful and prefix the given URL
         with the usual REST API path, if it doesn't contain it.
+        federation_auth_origin (bytes|None): if set to not-None, we will add a fake
+            Authorization header pretenting to be the given server name.
 
     Returns:
-        A synapse.http.site.SynapseRequest.
+        Tuple[synapse.http.site.SynapseRequest, channel]
     """
     if not isinstance(method, bytes):
         method = method.encode('ascii')
@@ -184,6 +187,11 @@ def make_request(
             b"Authorization", b"Bearer " + access_token.encode('ascii')
         )
 
+    if federation_auth_origin is not None:
+        req.requestHeaders.addRawHeader(
+            b"Authorization", b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,)
+        )
+
     if content:
         req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
 
@@ -288,9 +296,6 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs):
             **kwargs
         )
 
-    pool.runWithConnection = runWithConnection
-    pool.runInteraction = runInteraction
-
     class ThreadPool:
         """
         Threadless thread pool.
@@ -316,8 +321,12 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs):
             return d
 
     clock.threadpool = ThreadPool()
-    pool.threadpool = ThreadPool()
-    pool.running = True
+
+    if pool:
+        pool.runWithConnection = runWithConnection
+        pool.runInteraction = runInteraction
+        pool.threadpool = ThreadPool()
+        pool.running = True
     return d
 
 
diff --git a/tests/unittest.py b/tests/unittest.py
index fac254ff10..ef31321bc8 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -262,6 +262,7 @@ class HomeserverTestCase(TestCase):
         access_token=None,
         request=SynapseRequest,
         shorthand=True,
+        federation_auth_origin=None,
     ):
         """
         Create a SynapseRequest at the path using the method and containing the
@@ -275,15 +276,18 @@ class HomeserverTestCase(TestCase):
             a dict.
             shorthand: Whether to try and be helpful and prefix the given URL
             with the usual REST API path, if it doesn't contain it.
+            federation_auth_origin (bytes|None): if set to not-None, we will add a fake
+                Authorization header pretenting to be the given server name.
 
         Returns:
-            A synapse.http.site.SynapseRequest.
+            Tuple[synapse.http.site.SynapseRequest, channel]
         """
         if isinstance(content, dict):
             content = json.dumps(content).encode('utf8')
 
         return make_request(
-            self.reactor, method, path, content, access_token, request, shorthand
+            self.reactor, method, path, content, access_token, request, shorthand,
+            federation_auth_origin,
         )
 
     def render(self, request):
diff --git a/tests/utils.py b/tests/utils.py
index 2dfcb70a93..ee272157aa 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -29,7 +29,7 @@ from twisted.internet import defer, reactor
 from synapse.api.constants import EventTypes, RoomVersions
 from synapse.api.errors import CodeMessageException, cs_error
 from synapse.config.server import ServerConfig
-from synapse.federation.transport import server
+from synapse.federation.transport import server as federation_server
 from synapse.http.server import HttpServer
 from synapse.server import HomeServer
 from synapse.storage import DataStore
@@ -45,7 +45,9 @@ from synapse.util.ratelimitutils import FederationRateLimiter
 # set this to True to run the tests against postgres instead of sqlite.
 USE_POSTGRES_FOR_TESTS = os.environ.get("SYNAPSE_POSTGRES", False)
 LEAVE_DB = os.environ.get("SYNAPSE_LEAVE_DB", False)
-POSTGRES_USER = os.environ.get("SYNAPSE_POSTGRES_USER", "postgres")
+POSTGRES_USER = os.environ.get("SYNAPSE_POSTGRES_USER", None)
+POSTGRES_HOST = os.environ.get("SYNAPSE_POSTGRES_HOST", None)
+POSTGRES_PASSWORD = os.environ.get("SYNAPSE_POSTGRES_PASSWORD", None)
 POSTGRES_BASE_DB = "_synapse_unit_tests_base_%s" % (os.getpid(),)
 
 
@@ -58,6 +60,8 @@ def setupdb():
             "args": {
                 "database": POSTGRES_BASE_DB,
                 "user": POSTGRES_USER,
+                "host": POSTGRES_HOST,
+                "password": POSTGRES_PASSWORD,
                 "cp_min": 1,
                 "cp_max": 5,
             },
@@ -66,7 +70,9 @@ def setupdb():
         config.password_providers = []
         config.database_config = pgconfig
         db_engine = create_engine(pgconfig)
-        db_conn = db_engine.module.connect(user=POSTGRES_USER)
+        db_conn = db_engine.module.connect(
+            user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD
+        )
         db_conn.autocommit = True
         cur = db_conn.cursor()
         cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,))
@@ -76,7 +82,10 @@ def setupdb():
 
         # Set up in the db
         db_conn = db_engine.module.connect(
-            database=POSTGRES_BASE_DB, user=POSTGRES_USER
+            database=POSTGRES_BASE_DB,
+            user=POSTGRES_USER,
+            host=POSTGRES_HOST,
+            password=POSTGRES_PASSWORD,
         )
         cur = db_conn.cursor()
         _get_or_create_schema_state(cur, db_engine)
@@ -86,7 +95,9 @@ def setupdb():
         db_conn.close()
 
         def _cleanup():
-            db_conn = db_engine.module.connect(user=POSTGRES_USER)
+            db_conn = db_engine.module.connect(
+                user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD
+            )
             db_conn.autocommit = True
             cur = db_conn.cursor()
             cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,))
@@ -142,6 +153,9 @@ def default_config(name):
     config.saml2_enabled = False
     config.public_baseurl = None
     config.default_identity_server = None
+    config.key_refresh_interval = 24 * 60 * 60 * 1000
+    config.old_signing_keys = {}
+    config.tls_fingerprints = []
 
     config.use_frozen_dicts = False
 
@@ -186,6 +200,9 @@ def setup_test_homeserver(
     Args:
         cleanup_func : The function used to register a cleanup routine for
                        after the test.
+
+    Calling this method directly is deprecated: you should instead derive from
+    HomeserverTestCase.
     """
     if reactor is None:
         from twisted.internet import reactor
@@ -203,7 +220,14 @@ def setup_test_homeserver(
 
         config.database_config = {
             "name": "psycopg2",
-            "args": {"database": test_db, "cp_min": 1, "cp_max": 5},
+            "args": {
+                "database": test_db,
+                "host": POSTGRES_HOST,
+                "password": POSTGRES_PASSWORD,
+                "user": POSTGRES_USER,
+                "cp_min": 1,
+                "cp_max": 5,
+            },
         }
     else:
         config.database_config = {
@@ -217,7 +241,10 @@ def setup_test_homeserver(
     # the template database we generate in setupdb()
     if datastore is None and isinstance(db_engine, PostgresEngine):
         db_conn = db_engine.module.connect(
-            database=POSTGRES_BASE_DB, user=POSTGRES_USER
+            database=POSTGRES_BASE_DB,
+            user=POSTGRES_USER,
+            host=POSTGRES_HOST,
+            password=POSTGRES_PASSWORD,
         )
         db_conn.autocommit = True
         cur = db_conn.cursor()
@@ -267,7 +294,10 @@ def setup_test_homeserver(
 
                 # Drop the test database
                 db_conn = db_engine.module.connect(
-                    database=POSTGRES_BASE_DB, user=POSTGRES_USER
+                    database=POSTGRES_BASE_DB,
+                    user=POSTGRES_USER,
+                    host=POSTGRES_HOST,
+                    password=POSTGRES_PASSWORD,
                 )
                 db_conn.autocommit = True
                 cur = db_conn.cursor()
@@ -324,23 +354,27 @@ def setup_test_homeserver(
 
     fed = kargs.get("resource_for_federation", None)
     if fed:
-        server.register_servlets(
-            hs,
-            resource=fed,
-            authenticator=server.Authenticator(hs),
-            ratelimiter=FederationRateLimiter(
-                hs.get_clock(),
-                window_size=hs.config.federation_rc_window_size,
-                sleep_limit=hs.config.federation_rc_sleep_limit,
-                sleep_msec=hs.config.federation_rc_sleep_delay,
-                reject_limit=hs.config.federation_rc_reject_limit,
-                concurrent_requests=hs.config.federation_rc_concurrent,
-            ),
-        )
+        register_federation_servlets(hs, fed)
 
     defer.returnValue(hs)
 
 
+def register_federation_servlets(hs, resource):
+    federation_server.register_servlets(
+        hs,
+        resource=resource,
+        authenticator=federation_server.Authenticator(hs),
+        ratelimiter=FederationRateLimiter(
+            hs.get_clock(),
+            window_size=hs.config.federation_rc_window_size,
+            sleep_limit=hs.config.federation_rc_sleep_limit,
+            sleep_msec=hs.config.federation_rc_sleep_delay,
+            reject_limit=hs.config.federation_rc_reject_limit,
+            concurrent_requests=hs.config.federation_rc_concurrent,
+        ),
+    )
+
+
 def get_mock_call_args(pattern_func, mock_func):
     """ Return the arguments the mock function was called with interpreted
     by the pattern functions argument list.
@@ -457,6 +491,9 @@ class MockKey(object):
     def verify(self, message, sig):
         assert sig == b"\x9a\x87$"
 
+    def encode(self):
+        return b"<fake_encoded_key>"
+
 
 class MockClock(object):
     now = 1000
@@ -486,7 +523,7 @@ class MockClock(object):
         return t
 
     def looping_call(self, function, interval):
-        self.loopers.append([function, interval / 1000., self.now])
+        self.loopers.append([function, interval / 1000.0, self.now])
 
     def cancel_call_later(self, timer, ignore_errs=False):
         if timer[2]:
@@ -522,7 +559,7 @@ class MockClock(object):
                 looped[2] = self.now
 
     def advance_time_msec(self, ms):
-        self.advance_time(ms / 1000.)
+        self.advance_time(ms / 1000.0)
 
     def time_bound_deferred(self, d, *args, **kwargs):
         # We don't bother timing things out for now.
@@ -631,7 +668,7 @@ def create_room(hs, room_id, creator_id):
             "sender": creator_id,
             "room_id": room_id,
             "content": {},
-        }
+        },
     )
 
     event, context = yield event_creation_handler.create_new_client_event(builder)