diff --git a/.buildkite/docker-compose.py35.pg95.yaml b/.buildkite/docker-compose.py35.pg95.yaml
index 2f14387fbc..43237b7775 100644
--- a/.buildkite/docker-compose.py35.pg95.yaml
+++ b/.buildkite/docker-compose.py35.pg95.yaml
@@ -6,6 +6,7 @@ services:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
+ command: -c fsync=off
testenv:
image: python:3.5
@@ -16,6 +17,6 @@ services:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- working_dir: /app
+ working_dir: /src
volumes:
- - ..:/app
+ - ..:/src
diff --git a/.buildkite/docker-compose.py37.pg11.yaml b/.buildkite/docker-compose.py37.pg11.yaml
index f3eec05ceb..b767228147 100644
--- a/.buildkite/docker-compose.py37.pg11.yaml
+++ b/.buildkite/docker-compose.py37.pg11.yaml
@@ -6,6 +6,7 @@ services:
image: postgres:11
environment:
POSTGRES_PASSWORD: postgres
+ command: -c fsync=off
testenv:
image: python:3.7
@@ -16,6 +17,6 @@ services:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- working_dir: /app
+ working_dir: /src
volumes:
- - ..:/app
+ - ..:/src
diff --git a/.buildkite/docker-compose.py37.pg95.yaml b/.buildkite/docker-compose.py37.pg95.yaml
index 2a41db8eba..02fcd28304 100644
--- a/.buildkite/docker-compose.py37.pg95.yaml
+++ b/.buildkite/docker-compose.py37.pg95.yaml
@@ -6,6 +6,7 @@ services:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
+ command: -c fsync=off
testenv:
image: python:3.7
@@ -16,6 +17,6 @@ services:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- working_dir: /app
+ working_dir: /src
volumes:
- - ..:/app
+ - ..:/src
diff --git a/.buildkite/format_tap.py b/.buildkite/format_tap.py
index 94582f5571..b557a9c38e 100644
--- a/.buildkite/format_tap.py
+++ b/.buildkite/format_tap.py
@@ -1,3 +1,18 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import sys
from tap.parser import Parser
from tap.line import Result, Unknown, Diagnostic
diff --git a/.buildkite/merge_base_branch.sh b/.buildkite/merge_base_branch.sh
index 26176d6465..eb7219a56d 100755
--- a/.buildkite/merge_base_branch.sh
+++ b/.buildkite/merge_base_branch.sh
@@ -27,7 +27,7 @@ git config --global user.name "A robot"
# Fetch and merge. If it doesn't work, it will raise due to set -e.
git fetch -u origin $GITBASE
-git merge --no-edit origin/$GITBASE
+git merge --no-edit --no-commit origin/$GITBASE
# Show what we are after.
git --no-pager show -s
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
index b75269a155..ff05a7428b 100644
--- a/.buildkite/pipeline.yml
+++ b/.buildkite/pipeline.yml
@@ -1,8 +1,7 @@
env:
- CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
+ COVERALLS_REPO_TOKEN: wsJWOby6j0uCYFiCes3r0XauxO27mx8lD
steps:
-
- command:
- "python -m pip install tox"
- "tox -e check_codestyle"
@@ -10,6 +9,7 @@ steps:
plugins:
- docker#v3.0.1:
image: "python:3.6"
+ mount-buildkite-agent: false
- command:
- "python -m pip install tox"
@@ -18,6 +18,7 @@ steps:
plugins:
- docker#v3.0.1:
image: "python:3.6"
+ mount-buildkite-agent: false
- command:
- "python -m pip install tox"
@@ -26,6 +27,7 @@ steps:
plugins:
- docker#v3.0.1:
image: "python:3.6"
+ mount-buildkite-agent: false
- command:
- "python -m pip install tox"
@@ -36,6 +38,7 @@ steps:
- docker#v3.0.1:
image: "python:3.6"
propagate-environment: true
+ mount-buildkite-agent: false
- command:
- "python -m pip install tox"
@@ -44,21 +47,35 @@ steps:
plugins:
- docker#v3.0.1:
image: "python:3.6"
+ mount-buildkite-agent: false
- - wait
+ - command:
+ - "python -m pip install tox"
+ - "tox -e mypy"
+ label: ":mypy: mypy"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.5"
+ mount-buildkite-agent: false
+ - wait
- command:
- "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev"
- "python3.5 -m pip install tox"
- - "tox -e py35-old,codecov"
+ - "tox -e py35-old,combine"
label: ":python: 3.5 / SQLite / Old Deps"
env:
TRIAL_FLAGS: "-j 2"
+ LANG: "C.UTF-8"
plugins:
- docker#v3.0.1:
- image: "ubuntu:xenial" # We use xenail to get an old sqlite and python
+ image: "ubuntu:xenial" # We use xenial to get an old sqlite and python
+ workdir: "/src"
+ mount-buildkite-agent: false
propagate-environment: true
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
retry:
automatic:
- exit_status: -1
@@ -68,14 +85,18 @@ steps:
- command:
- "python -m pip install tox"
- - "tox -e py35,codecov"
+ - "tox -e py35,combine"
label: ":python: 3.5 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:3.5"
+ workdir: "/src"
+ mount-buildkite-agent: false
propagate-environment: true
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
retry:
automatic:
- exit_status: -1
@@ -85,14 +106,18 @@ steps:
- command:
- "python -m pip install tox"
- - "tox -e py36,codecov"
+ - "tox -e py36,combine"
label: ":python: 3.6 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:3.6"
+ workdir: "/src"
+ mount-buildkite-agent: false
propagate-environment: true
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
retry:
automatic:
- exit_status: -1
@@ -102,14 +127,18 @@ steps:
- command:
- "python -m pip install tox"
- - "tox -e py37,codecov"
+ - "tox -e py37,combine"
label: ":python: 3.7 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:3.7"
+ workdir: "/src"
+ mount-buildkite-agent: false
propagate-environment: true
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
retry:
automatic:
- exit_status: -1
@@ -123,12 +152,14 @@ steps:
env:
TRIAL_FLAGS: "-j 8"
command:
- - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
+ - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,combine'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py35.pg95.yaml
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
retry:
automatic:
- exit_status: -1
@@ -142,12 +173,14 @@ steps:
env:
TRIAL_FLAGS: "-j 8"
command:
- - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
+ - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py37.pg95.yaml
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
retry:
automatic:
- exit_status: -1
@@ -161,12 +194,14 @@ steps:
env:
TRIAL_FLAGS: "-j 8"
command:
- - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
+ - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py37.pg11.yaml
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
retry:
automatic:
- exit_status: -1
@@ -174,7 +209,6 @@ steps:
- exit_status: 2
limit: 2
-
- label: "SyTest - :python: 3.5 / SQLite / Monolith"
agents:
queue: "medium"
@@ -187,6 +221,16 @@ steps:
propagate-environment: true
always-pull: true
workdir: "/src"
+ entrypoint: ["/bin/sh", "-e", "-c"]
+ mount-buildkite-agent: false
+ volumes: ["./logs:/logs"]
+ - artifacts#v1.2.0:
+ upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ]
+ - matrix-org/annotate:
+ path: "logs/annotate.md"
+ class: "error"
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
retry:
automatic:
- exit_status: -1
@@ -196,7 +240,7 @@ steps:
- label: "SyTest - :python: 3.5 / :postgres: 9.6 / Monolith"
agents:
- queue: "medium"
+ queue: "xlarge"
env:
POSTGRES: "1"
command:
@@ -204,10 +248,20 @@ steps:
- "bash /synapse_sytest.sh"
plugins:
- docker#v3.0.1:
- image: "matrixdotorg/sytest-synapse:py35"
+ image: "matrixdotorg/sytest-synapse:dinsic-py3"
propagate-environment: true
always-pull: true
workdir: "/src"
+ entrypoint: ["/bin/sh", "-e", "-c"]
+ mount-buildkite-agent: false
+ volumes: ["./logs:/logs"]
+ - artifacts#v1.2.0:
+ upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ]
+ - matrix-org/annotate:
+ path: "logs/annotate.md"
+ class: "error"
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
retry:
automatic:
- exit_status: -1
@@ -215,7 +269,7 @@ steps:
- exit_status: 2
limit: 2
- - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Workers"
+ - label: "SyTest - :python: 3 / :postgres: 9.6 / Workers"
agents:
queue: "medium"
env:
@@ -228,13 +282,29 @@ steps:
- "bash /synapse_sytest.sh"
plugins:
- docker#v3.0.1:
- image: "matrixdotorg/sytest-synapse:py35"
+ image: "matrixdotorg/sytest-synapse:dinsic-py3"
propagate-environment: true
always-pull: true
workdir: "/src"
+ entrypoint: ["/bin/sh", "-e", "-c"]
+ mount-buildkite-agent: false
+ volumes: ["./logs:/logs"]
+ - artifacts#v1.2.0:
+ upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ]
+ - matrix-org/annotate:
+ path: "logs/annotate.md"
+ class: "error"
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
+
+ - wait: ~
+ continue_on_failure: true
+
+ - label: Trigger webhook
+ command: "curl -k https://coveralls.io/webhook?repo_token=$COVERALLS_REPO_TOKEN -d \"payload[build_num]=$BUILDKITE_BUILD_NUMBER&payload[status]=done\""
diff --git a/.coveragerc b/.coveragerc
index e9460a340a..11f2ec8387 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,7 +1,8 @@
[run]
branch = True
parallel = True
-include = synapse/*
+include=$TOP/synapse/*
+data_file = $TOP/.coverage
[report]
precision = 2
diff --git a/.gitignore b/.gitignore
index f6168a8819..e53d4908d5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,7 @@ _trial_temp*/
/*.signing.key
/env/
/homeserver*.yaml
+/logs
/media_store/
/uploads
@@ -29,8 +30,9 @@ _trial_temp*/
/.vscode/
# build products
-/.coverage*
!/.coveragerc
+/.coverage*
+/.mypy_cache/
/.tox
/build/
/coverage.*
@@ -38,4 +40,3 @@ _trial_temp*/
/docs/build/
/htmlcov
/pip-wheel-metadata/
-
diff --git a/INSTALL.md b/INSTALL.md
index 5728882460..6bce370ea8 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -36,7 +36,7 @@ that your email address is probably `user@example.com` rather than
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
-- Python 3.5, 3.6, 3.7, or 2.7
+- Python 3.5, 3.6, or 3.7
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
Synapse is written in Python but some of the libraries it uses are written in
@@ -421,7 +421,7 @@ If Synapse is not configured with an SMTP server, password reset via email will
The easiest way to create a new user is to do so from a client like [Riot](https://riot.im).
-Alternatively you can do so from the command line if you have installed via pip.
+Alternatively you can do so from the command line if you have installed via pip.
This can be done as follows:
diff --git a/MANIFEST.in b/MANIFEST.in
index 919cd8a1cd..8fc3b6162b 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,5 @@
include synctl
+include sytest-blacklist
include LICENSE
include VERSION
include *.rst
@@ -38,14 +39,21 @@ exclude sytest-blacklist
include pyproject.toml
recursive-include changelog.d *
-prune .github
-prune demo/etc
-prune docker
+prune .buildkite
prune .circleci
+prune .codecov.yml
prune .coveragerc
+prune .github
prune debian
-prune .codecov.yml
-prune .buildkite
+prune demo/etc
+prune docker
+prune mypy.ini
+prune stubs
exclude jenkins*
recursive-exclude jenkins *.sh
+
+# FIXME: we shouldn't have these templates here
+recursive-include res/templates-dinsic *.css
+recursive-include res/templates-dinsic *.html
+recursive-include res/templates-dinsic *.txt
diff --git a/UPGRADE.rst b/UPGRADE.rst
index cf228c7c52..dddcd75fda 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -49,6 +49,56 @@ returned by the Client-Server API:
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
+Upgrading to v1.4.0
+===================
+
+Config options
+--------------
+
+**Note: Registration by email address or phone number will not work in this release unless
+some config options are changed from their defaults.**
+
+This is due to Synapse v1.4.0 now defaulting to sending registration and password reset tokens
+itself. This is for security reasons as well as putting less reliance on identity servers.
+However, currently Synapse only supports sending emails, and does not have support for
+phone-based password reset or account registration. If Synapse is configured to handle these on
+its own, phone-based password resets and registration will be disabled. For Synapse to send
+emails, the ``email`` block of the config must be filled out. If not, then password resets and
+registration via email will be disabled entirely.
+
+This release also deprecates the ``email.trust_identity_server_for_password_resets`` option and
+replaces it with the ``account_threepid_delegates`` dictionary. This option defines whether the
+homeserver should delegate an external server (typically an `identity server
+<https://matrix.org/docs/spec/identity_service/r0.2.1>`_) to handle sending password reset or
+registration messages via email and SMS.
+
+If ``email.trust_identity_server_for_password_resets`` is set to ``true``, and
+``account_threepid_delegates.email`` is not set, then the first entry in
+``trusted_third_party_id_servers`` will be used as the account threepid delegate for email.
+This is to ensure compatibility with existing Synapse installs that set up external server
+handling for these tasks before v1.4.0. If ``email.trust_identity_server_for_password_resets``
+is ``true`` and no trusted identity server domains are configured, Synapse will throw an error.
+
+If ``email.trust_identity_server_for_password_resets`` is ``false`` or absent and a threepid
+type in ``account_threepid_delegates`` is not set to a domain, then Synapse will attempt to
+send password reset and registration messages for that type.
+
+Email templates
+---------------
+
+If you have configured a custom template directory with the ``email.template_dir`` option, be
+aware that there are new templates regarding registration. ``registration.html`` and
+``registration.txt`` have been added and contain the content that is sent to a client upon
+registering via an email address.
+
+``registration_success.html`` and ``registration_failure.html`` are also new HTML templates
+that will be shown to the user when they click the link in their registration emai , either
+showing them a success or failure page (assuming a redirect URL is not configured).
+
+Synapse will expect these files to exist inside the configured template directory. To view the
+default templates, see `synapse/res/templates
+<https://github.com/matrix-org/synapse/tree/master/synapse/res/templates>`_.
+
Upgrading to v1.2.0
===================
@@ -132,6 +182,19 @@ server for password resets, set ``trust_identity_server_for_password_resets`` to
See the `sample configuration file <docs/sample_config.yaml>`_
for more details on these settings.
+New email templates
+---------------
+Some new templates have been added to the default template directory for the purpose of the
+homeserver sending its own password reset emails. If you have configured a custom
+``template_dir`` in your Synapse config, these files will need to be added.
+
+``password_reset.html`` and ``password_reset.txt`` are HTML and plain text templates
+respectively that contain the contents of what will be emailed to the user upon attempting to
+reset their password via email. ``password_reset_success.html`` and
+``password_reset_failure.html`` are HTML files that the content of which (assuming no redirect
+URL is set) will be shown to the user after they attempt to click the link in the email sent
+to them.
+
Upgrading to v0.99.0
====================
diff --git a/changelog.d/1.feature b/changelog.d/1.feature
new file mode 100644
index 0000000000..845642e445
--- /dev/null
+++ b/changelog.d/1.feature
@@ -0,0 +1 @@
+Forbid changing the name, avatar or topic of a direct room.
diff --git a/changelog.d/10.bugfix b/changelog.d/10.bugfix
new file mode 100644
index 0000000000..51f89f46dd
--- /dev/null
+++ b/changelog.d/10.bugfix
@@ -0,0 +1 @@
+Don't apply retention policy based filtering on state events.
diff --git a/changelog.d/11.feature b/changelog.d/11.feature
new file mode 100644
index 0000000000..362e4b1efd
--- /dev/null
+++ b/changelog.d/11.feature
@@ -0,0 +1 @@
+Allow server admins to configure a custom global rate-limiting for third party invites.
\ No newline at end of file
diff --git a/changelog.d/12.feature b/changelog.d/12.feature
new file mode 100644
index 0000000000..8e6e7a28af
--- /dev/null
+++ b/changelog.d/12.feature
@@ -0,0 +1 @@
+Add `/user/:user_id/info` CS servlet and to give user deactivated/expired information.
\ No newline at end of file
diff --git a/changelog.d/13.feature b/changelog.d/13.feature
new file mode 100644
index 0000000000..c2d2e93abf
--- /dev/null
+++ b/changelog.d/13.feature
@@ -0,0 +1 @@
+Hide expired users from the user directory, and optionally re-add them on renewal.
\ No newline at end of file
diff --git a/changelog.d/14.feature b/changelog.d/14.feature
new file mode 100644
index 0000000000..020d0bac1e
--- /dev/null
+++ b/changelog.d/14.feature
@@ -0,0 +1 @@
+User displaynames now have capitalised letters after - symbols.
\ No newline at end of file
diff --git a/changelog.d/15.misc b/changelog.d/15.misc
new file mode 100644
index 0000000000..4cc4a5175f
--- /dev/null
+++ b/changelog.d/15.misc
@@ -0,0 +1 @@
+Fix the ordering on `scripts/generate_signing_key.py`'s import statement.
diff --git a/changelog.d/17.misc b/changelog.d/17.misc
new file mode 100644
index 0000000000..58120ab5c7
--- /dev/null
+++ b/changelog.d/17.misc
@@ -0,0 +1 @@
+Blacklist some flaky sytests until they're fixed.
\ No newline at end of file
diff --git a/changelog.d/18.feature b/changelog.d/18.feature
new file mode 100644
index 0000000000..f5aa29a6e8
--- /dev/null
+++ b/changelog.d/18.feature
@@ -0,0 +1 @@
+Add option `limit_profile_requests_to_known_users` to prevent requirement of a user sharing a room with another user to query their profile information.
\ No newline at end of file
diff --git a/changelog.d/19.feature b/changelog.d/19.feature
new file mode 100644
index 0000000000..95a44a4a89
--- /dev/null
+++ b/changelog.d/19.feature
@@ -0,0 +1 @@
+Add `max_avatar_size` and `allowed_avatar_mimetypes` to restrict the size of user avatars and their file type respectively.
\ No newline at end of file
diff --git a/changelog.d/2.bugfix b/changelog.d/2.bugfix
new file mode 100644
index 0000000000..4fe5691468
--- /dev/null
+++ b/changelog.d/2.bugfix
@@ -0,0 +1 @@
+Don't treat 3PID revocation as a new 3PID invite.
diff --git a/changelog.d/20.bugfix b/changelog.d/20.bugfix
new file mode 100644
index 0000000000..8ba53c28f9
--- /dev/null
+++ b/changelog.d/20.bugfix
@@ -0,0 +1 @@
+Validate `client_secret` parameter against the regex provided by the C-S spec.
\ No newline at end of file
diff --git a/changelog.d/21.bugfix b/changelog.d/21.bugfix
new file mode 100644
index 0000000000..630d7812f7
--- /dev/null
+++ b/changelog.d/21.bugfix
@@ -0,0 +1 @@
+Fix resetting user passwords via a phone number.
diff --git a/changelog.d/3.bugfix b/changelog.d/3.bugfix
new file mode 100644
index 0000000000..cc4bcefa80
--- /dev/null
+++ b/changelog.d/3.bugfix
@@ -0,0 +1 @@
+Fix encoding on password reset HTML responses in Python 2.
diff --git a/changelog.d/4.bugfix b/changelog.d/4.bugfix
new file mode 100644
index 0000000000..fe717920a6
--- /dev/null
+++ b/changelog.d/4.bugfix
@@ -0,0 +1 @@
+Fix handling of filtered strings in Python 3.
diff --git a/changelog.d/5.bugfix b/changelog.d/5.bugfix
new file mode 100644
index 0000000000..53f57f46ca
--- /dev/null
+++ b/changelog.d/5.bugfix
@@ -0,0 +1 @@
+Fix room retention policy management in worker mode.
diff --git a/changelog.d/5083.feature b/changelog.d/5083.feature
new file mode 100644
index 0000000000..2ffdd37eef
--- /dev/null
+++ b/changelog.d/5083.feature
@@ -0,0 +1 @@
+Adds auth_profile_reqs option to require access_token to GET /profile endpoints on CS API.
diff --git a/changelog.d/5098.misc b/changelog.d/5098.misc
new file mode 100644
index 0000000000..9cd83bf226
--- /dev/null
+++ b/changelog.d/5098.misc
@@ -0,0 +1 @@
+Add workarounds for pep-517 install errors.
diff --git a/changelog.d/5214.feature b/changelog.d/5214.feature
new file mode 100644
index 0000000000..6c0f15c901
--- /dev/null
+++ b/changelog.d/5214.feature
@@ -0,0 +1 @@
+Allow server admins to define and enforce a password policy (MSC2000).
diff --git a/changelog.d/5416.misc b/changelog.d/5416.misc
new file mode 100644
index 0000000000..155e8c7cd3
--- /dev/null
+++ b/changelog.d/5416.misc
@@ -0,0 +1 @@
+Add unique index to the profile_replication_status table.
diff --git a/changelog.d/5420.feature b/changelog.d/5420.feature
new file mode 100644
index 0000000000..745864b903
--- /dev/null
+++ b/changelog.d/5420.feature
@@ -0,0 +1 @@
+Add configuration option to hide new users from the user directory.
diff --git a/changelog.d/5610.feature b/changelog.d/5610.feature
new file mode 100644
index 0000000000..b99514f97e
--- /dev/null
+++ b/changelog.d/5610.feature
@@ -0,0 +1 @@
+Implement new custom event rules for power levels.
diff --git a/changelog.d/5633.bugfix b/changelog.d/5633.bugfix
new file mode 100644
index 0000000000..b2ff803b9d
--- /dev/null
+++ b/changelog.d/5633.bugfix
@@ -0,0 +1 @@
+Don't create broken room when power_level_content_override.users does not contain creator_id.
\ No newline at end of file
diff --git a/changelog.d/5680.misc b/changelog.d/5680.misc
new file mode 100644
index 0000000000..46a403a188
--- /dev/null
+++ b/changelog.d/5680.misc
@@ -0,0 +1 @@
+Lay the groundwork for structured logging output.
diff --git a/changelog.d/5702.bugfix b/changelog.d/5702.bugfix
new file mode 100644
index 0000000000..43b6e39b13
--- /dev/null
+++ b/changelog.d/5702.bugfix
@@ -0,0 +1 @@
+Fix 3PID invite to invite association detection in the Tchap room access rules.
diff --git a/changelog.d/5759.misc b/changelog.d/5759.misc
new file mode 100644
index 0000000000..c0bc566c4c
--- /dev/null
+++ b/changelog.d/5759.misc
@@ -0,0 +1 @@
+Allow devices to be marked as hidden, for use by features such as cross-signing.
\ No newline at end of file
diff --git a/changelog.d/5760.feature b/changelog.d/5760.feature
new file mode 100644
index 0000000000..90302d793e
--- /dev/null
+++ b/changelog.d/5760.feature
@@ -0,0 +1 @@
+Force the access rule to be "restricted" if the join rule is "public".
diff --git a/changelog.d/5771.feature b/changelog.d/5771.feature
new file mode 100644
index 0000000000..f2f4de1fdd
--- /dev/null
+++ b/changelog.d/5771.feature
@@ -0,0 +1 @@
+Make Opentracing work in worker mode.
diff --git a/changelog.d/5776.misc b/changelog.d/5776.misc
new file mode 100644
index 0000000000..1fb1b9c152
--- /dev/null
+++ b/changelog.d/5776.misc
@@ -0,0 +1 @@
+Update opentracing docs to use the unified `trace` method.
diff --git a/changelog.d/5815.feature b/changelog.d/5815.feature
new file mode 100644
index 0000000000..ca4df4e7f6
--- /dev/null
+++ b/changelog.d/5815.feature
@@ -0,0 +1 @@
+Implement per-room message retention policies.
diff --git a/changelog.d/5835.feature b/changelog.d/5835.feature
new file mode 100644
index 0000000000..3e8bf5068d
--- /dev/null
+++ b/changelog.d/5835.feature
@@ -0,0 +1 @@
+Add the ability to send registration emails from the homeserver rather than delegating to an identity server.
diff --git a/changelog.d/5844.misc b/changelog.d/5844.misc
new file mode 100644
index 0000000000..a0826af0d2
--- /dev/null
+++ b/changelog.d/5844.misc
@@ -0,0 +1 @@
+Retry well-known lookup before the cache expires, giving a grace period where the remote well-known can be down but we still use the old result.
diff --git a/changelog.d/5845.feature b/changelog.d/5845.feature
new file mode 100644
index 0000000000..7b0dc9a95e
--- /dev/null
+++ b/changelog.d/5845.feature
@@ -0,0 +1 @@
+Add an admin API to purge old rooms from the database.
diff --git a/changelog.d/5850.feature b/changelog.d/5850.feature
new file mode 100644
index 0000000000..b565929a54
--- /dev/null
+++ b/changelog.d/5850.feature
@@ -0,0 +1 @@
+Add retry to well-known lookups if we have recently seen a valid well-known record for the server.
diff --git a/changelog.d/5852.feature b/changelog.d/5852.feature
new file mode 100644
index 0000000000..4a0fc6c542
--- /dev/null
+++ b/changelog.d/5852.feature
@@ -0,0 +1 @@
+Pass opentracing contexts between servers when transmitting EDUs.
diff --git a/changelog.d/5853.feature b/changelog.d/5853.feature
new file mode 100644
index 0000000000..80a04ae2ee
--- /dev/null
+++ b/changelog.d/5853.feature
@@ -0,0 +1 @@
+Opentracing for device list updates.
diff --git a/changelog.d/5855.misc b/changelog.d/5855.misc
new file mode 100644
index 0000000000..32db7fbe37
--- /dev/null
+++ b/changelog.d/5855.misc
@@ -0,0 +1 @@
+Opentracing for room and e2e keys.
diff --git a/changelog.d/5856.feature b/changelog.d/5856.feature
new file mode 100644
index 0000000000..f4310b9244
--- /dev/null
+++ b/changelog.d/5856.feature
@@ -0,0 +1 @@
+Add a tag recording a request's authenticated entity and corresponding servlet in opentracing.
diff --git a/changelog.d/5857.bugfix b/changelog.d/5857.bugfix
new file mode 100644
index 0000000000..008799ccbb
--- /dev/null
+++ b/changelog.d/5857.bugfix
@@ -0,0 +1 @@
+Fix database index so that different backup versions can have the same sessions.
diff --git a/changelog.d/5859.feature b/changelog.d/5859.feature
new file mode 100644
index 0000000000..52df7fc81b
--- /dev/null
+++ b/changelog.d/5859.feature
@@ -0,0 +1 @@
+Add unstable support for MSC2197 (filtered search requests over federation), in order to allow upcoming room directory query performance improvements.
diff --git a/changelog.d/5860.misc b/changelog.d/5860.misc
new file mode 100644
index 0000000000..f9960b17b4
--- /dev/null
+++ b/changelog.d/5860.misc
@@ -0,0 +1 @@
+Remove log line for debugging issue #5407.
diff --git a/changelog.d/5863.bugfix b/changelog.d/5863.bugfix
new file mode 100644
index 0000000000..bceae5be67
--- /dev/null
+++ b/changelog.d/5863.bugfix
@@ -0,0 +1 @@
+Fix Synapse looking for config options `password_reset_failure_template` and `password_reset_success_template`, when they are actually `password_reset_template_failure_html`, `password_reset_template_success_html`.
diff --git a/changelog.d/5864.feature b/changelog.d/5864.feature
new file mode 100644
index 0000000000..40ac11db64
--- /dev/null
+++ b/changelog.d/5864.feature
@@ -0,0 +1 @@
+Correctly retry all hosts returned from SRV when we fail to connect.
diff --git a/changelog.d/5868.feature b/changelog.d/5868.feature
new file mode 100644
index 0000000000..69605c1ae1
--- /dev/null
+++ b/changelog.d/5868.feature
@@ -0,0 +1 @@
+Add `m.require_identity_server` key to `/versions`'s `unstable_features` section.
\ No newline at end of file
diff --git a/changelog.d/5875.misc b/changelog.d/5875.misc
new file mode 100644
index 0000000000..e188c28d2f
--- /dev/null
+++ b/changelog.d/5875.misc
@@ -0,0 +1 @@
+Deprecate the `trusted_third_party_id_servers` option.
\ No newline at end of file
diff --git a/changelog.d/5876.feature b/changelog.d/5876.feature
new file mode 100644
index 0000000000..df88193fbd
--- /dev/null
+++ b/changelog.d/5876.feature
@@ -0,0 +1 @@
+Replace `trust_identity_server_for_password_resets` config option with `account_threepid_delegates`.
\ No newline at end of file
diff --git a/changelog.d/5877.removal b/changelog.d/5877.removal
new file mode 100644
index 0000000000..b6d84fb401
--- /dev/null
+++ b/changelog.d/5877.removal
@@ -0,0 +1 @@
+Remove shared secret registration from client/r0/register endpoint. Contributed by Awesome Technologies Innovationslabor GmbH.
diff --git a/changelog.d/5878.feature b/changelog.d/5878.feature
new file mode 100644
index 0000000000..d9d6df880e
--- /dev/null
+++ b/changelog.d/5878.feature
@@ -0,0 +1 @@
+Add admin API endpoint for setting whether or not a user is a server administrator.
diff --git a/changelog.d/5885.bugfix b/changelog.d/5885.bugfix
new file mode 100644
index 0000000000..411d925fd4
--- /dev/null
+++ b/changelog.d/5885.bugfix
@@ -0,0 +1 @@
+Fix stack overflow when recovering an appservice which had an outage.
diff --git a/changelog.d/5886.misc b/changelog.d/5886.misc
new file mode 100644
index 0000000000..22adba3d85
--- /dev/null
+++ b/changelog.d/5886.misc
@@ -0,0 +1 @@
+Refactor the Appservice scheduler code.
diff --git a/changelog.d/5892.misc b/changelog.d/5892.misc
new file mode 100644
index 0000000000..939fe8c655
--- /dev/null
+++ b/changelog.d/5892.misc
@@ -0,0 +1 @@
+Compatibility with v2 Identity Service APIs other than /lookup.
\ No newline at end of file
diff --git a/changelog.d/5893.misc b/changelog.d/5893.misc
new file mode 100644
index 0000000000..07ee4888dc
--- /dev/null
+++ b/changelog.d/5893.misc
@@ -0,0 +1 @@
+Drop some unused tables.
diff --git a/changelog.d/5894.misc b/changelog.d/5894.misc
new file mode 100644
index 0000000000..fca4485ff7
--- /dev/null
+++ b/changelog.d/5894.misc
@@ -0,0 +1 @@
+Add missing index on users_in_public_rooms to improve the performance of directory queries.
diff --git a/changelog.d/5895.feature b/changelog.d/5895.feature
new file mode 100644
index 0000000000..c394a3772c
--- /dev/null
+++ b/changelog.d/5895.feature
@@ -0,0 +1 @@
+Add config option to sign remote key query responses with a separate key.
diff --git a/changelog.d/5896.misc b/changelog.d/5896.misc
new file mode 100644
index 0000000000..ed47c747bd
--- /dev/null
+++ b/changelog.d/5896.misc
@@ -0,0 +1 @@
+Improve the logging when we have an error when fetching signing keys.
diff --git a/changelog.d/5897.feature b/changelog.d/5897.feature
new file mode 100644
index 0000000000..1557e559e8
--- /dev/null
+++ b/changelog.d/5897.feature
@@ -0,0 +1 @@
+Switch to using the v2 Identity Service `/lookup` API where available, with fallback to v1. (Implements [MSC2134](https://github.com/matrix-org/matrix-doc/pull/2134) plus id_access_token authentication for v2 Identity Service APIs from [MSC2140](https://github.com/matrix-org/matrix-doc/pull/2140)).
diff --git a/changelog.d/5900.feature b/changelog.d/5900.feature
new file mode 100644
index 0000000000..b62d88a76b
--- /dev/null
+++ b/changelog.d/5900.feature
@@ -0,0 +1 @@
+Add support for config templating.
diff --git a/changelog.d/5902.feature b/changelog.d/5902.feature
new file mode 100644
index 0000000000..0660f65cfa
--- /dev/null
+++ b/changelog.d/5902.feature
@@ -0,0 +1 @@
+Users with the type of "support" or "bot" are no longer required to consent.
\ No newline at end of file
diff --git a/changelog.d/5904.feature b/changelog.d/5904.feature
new file mode 100644
index 0000000000..43b5304f39
--- /dev/null
+++ b/changelog.d/5904.feature
@@ -0,0 +1 @@
+Let synctl accept a directory of config files.
diff --git a/changelog.d/5906.feature b/changelog.d/5906.feature
new file mode 100644
index 0000000000..7c789510a6
--- /dev/null
+++ b/changelog.d/5906.feature
@@ -0,0 +1 @@
+Increase max display name size to 256.
diff --git a/changelog.d/5909.misc b/changelog.d/5909.misc
new file mode 100644
index 0000000000..03d0c4367b
--- /dev/null
+++ b/changelog.d/5909.misc
@@ -0,0 +1 @@
+Fix error message which referred to public_base_url instead of public_baseurl. Thanks to @aaronraimist for the fix!
diff --git a/changelog.d/5911.misc b/changelog.d/5911.misc
new file mode 100644
index 0000000000..fe5a8fd59c
--- /dev/null
+++ b/changelog.d/5911.misc
@@ -0,0 +1 @@
+Add support for database engine-specific schema deltas, based on file extension.
\ No newline at end of file
diff --git a/changelog.d/5914.feature b/changelog.d/5914.feature
new file mode 100644
index 0000000000..85c7bf5963
--- /dev/null
+++ b/changelog.d/5914.feature
@@ -0,0 +1 @@
+Add admin API endpoint for getting whether or not a user is a server administrator.
diff --git a/changelog.d/5915.bugfix b/changelog.d/5915.bugfix
new file mode 100644
index 0000000000..bf5b99fedc
--- /dev/null
+++ b/changelog.d/5915.bugfix
@@ -0,0 +1 @@
+Fix 404 for thumbnail download when `dynamic_thumbnails` is `false` and the thumbnail was dynamically generated. Fix reported by rkfg.
diff --git a/changelog.d/5920.bugfix b/changelog.d/5920.bugfix
new file mode 100644
index 0000000000..e45eb0ffee
--- /dev/null
+++ b/changelog.d/5920.bugfix
@@ -0,0 +1 @@
+Fix a cache-invalidation bug for worker-based deployments.
diff --git a/changelog.d/5922.misc b/changelog.d/5922.misc
new file mode 100644
index 0000000000..2cc864897e
--- /dev/null
+++ b/changelog.d/5922.misc
@@ -0,0 +1 @@
+Update Buildkite pipeline to use plugins instead of buildkite-agent commands.
diff --git a/changelog.d/5926.misc b/changelog.d/5926.misc
new file mode 100644
index 0000000000..4383c302ec
--- /dev/null
+++ b/changelog.d/5926.misc
@@ -0,0 +1 @@
+Add link in sample config to the logging config schema.
diff --git a/changelog.d/5931.misc b/changelog.d/5931.misc
new file mode 100644
index 0000000000..ac8e74f5b9
--- /dev/null
+++ b/changelog.d/5931.misc
@@ -0,0 +1 @@
+Remove unnecessary parentheses in return statements.
\ No newline at end of file
diff --git a/changelog.d/5934.feature b/changelog.d/5934.feature
new file mode 100644
index 0000000000..eae969a52a
--- /dev/null
+++ b/changelog.d/5934.feature
@@ -0,0 +1 @@
+Redact events in the database that have been redacted for a month.
diff --git a/changelog.d/5938.misc b/changelog.d/5938.misc
new file mode 100644
index 0000000000..b5a3b6ee3b
--- /dev/null
+++ b/changelog.d/5938.misc
@@ -0,0 +1 @@
+Remove unused jenkins/prepare_sytest.sh file.
diff --git a/changelog.d/5940.feature b/changelog.d/5940.feature
new file mode 100644
index 0000000000..5b69b97fe7
--- /dev/null
+++ b/changelog.d/5940.feature
@@ -0,0 +1 @@
+Add the ability to send registration emails from the homeserver rather than delegating to an identity server.
\ No newline at end of file
diff --git a/changelog.d/5943.misc b/changelog.d/5943.misc
new file mode 100644
index 0000000000..6545e1244a
--- /dev/null
+++ b/changelog.d/5943.misc
@@ -0,0 +1 @@
+Move Buildkite pipeline config to the pipelines repo.
diff --git a/changelog.d/5953.misc b/changelog.d/5953.misc
new file mode 100644
index 0000000000..38e885f42a
--- /dev/null
+++ b/changelog.d/5953.misc
@@ -0,0 +1 @@
+Update INSTALL.md to say that Python 2 is no longer supported.
diff --git a/changelog.d/5962.misc b/changelog.d/5962.misc
new file mode 100644
index 0000000000..d97d376c36
--- /dev/null
+++ b/changelog.d/5962.misc
@@ -0,0 +1 @@
+Remove unnecessary return statements in the codebase which were the result of a regex run.
\ No newline at end of file
diff --git a/changelog.d/5963.misc b/changelog.d/5963.misc
new file mode 100644
index 0000000000..0d6c3c3d65
--- /dev/null
+++ b/changelog.d/5963.misc
@@ -0,0 +1 @@
+Remove left-over methods from C/S registration API.
\ No newline at end of file
diff --git a/changelog.d/5964.feature b/changelog.d/5964.feature
new file mode 100644
index 0000000000..273c9df026
--- /dev/null
+++ b/changelog.d/5964.feature
@@ -0,0 +1 @@
+Remove `bind_email` and `bind_msisdn` parameters from /register ala MSC2140.
\ No newline at end of file
diff --git a/changelog.d/5966.bugfix b/changelog.d/5966.bugfix
new file mode 100644
index 0000000000..b8ef5a7819
--- /dev/null
+++ b/changelog.d/5966.bugfix
@@ -0,0 +1 @@
+Fix admin API for listing media in a room not being available with an external media repo.
diff --git a/changelog.d/5967.bugfix b/changelog.d/5967.bugfix
new file mode 100644
index 0000000000..8d7bf5c2e9
--- /dev/null
+++ b/changelog.d/5967.bugfix
@@ -0,0 +1 @@
+Fix list media admin API always returning an error.
diff --git a/changelog.d/5969.feature b/changelog.d/5969.feature
new file mode 100644
index 0000000000..cf603fa0c6
--- /dev/null
+++ b/changelog.d/5969.feature
@@ -0,0 +1 @@
+Replace `trust_identity_server_for_password_resets` config option with `account_threepid_delegates`.
diff --git a/changelog.d/5970.docker b/changelog.d/5970.docker
new file mode 100644
index 0000000000..c9d04da9cd
--- /dev/null
+++ b/changelog.d/5970.docker
@@ -0,0 +1 @@
+Avoid changing UID/GID if they are already correct.
diff --git a/changelog.d/5971.bugfix b/changelog.d/5971.bugfix
new file mode 100644
index 0000000000..9ea095103b
--- /dev/null
+++ b/changelog.d/5971.bugfix
@@ -0,0 +1 @@
+Fix room and user stats tracking.
diff --git a/changelog.d/5975.misc b/changelog.d/5975.misc
new file mode 100644
index 0000000000..5fcd229b89
--- /dev/null
+++ b/changelog.d/5975.misc
@@ -0,0 +1 @@
+Cleanup event auth type initialisation.
\ No newline at end of file
diff --git a/changelog.d/5980.feature b/changelog.d/5980.feature
new file mode 100644
index 0000000000..f25d8d81d9
--- /dev/null
+++ b/changelog.d/5980.feature
@@ -0,0 +1 @@
+Add POST /_matrix/client/r0/account/3pid/unbind endpoint from MSC2140 for unbinding a 3PID from an identity server without removing it from the homeserver user account.
\ No newline at end of file
diff --git a/changelog.d/5981.feature b/changelog.d/5981.feature
new file mode 100644
index 0000000000..e39514273d
--- /dev/null
+++ b/changelog.d/5981.feature
@@ -0,0 +1 @@
+Setting metrics_flags.known_servers to True in the configuration will publish the synapse_federation_known_servers metric over Prometheus. This represents the total number of servers your server knows about (i.e. is in rooms with), including itself.
diff --git a/changelog.d/5982.bugfix b/changelog.d/5982.bugfix
new file mode 100644
index 0000000000..3ea281a3a0
--- /dev/null
+++ b/changelog.d/5982.bugfix
@@ -0,0 +1 @@
+Include missing opentracing contexts in outbout replication requests.
diff --git a/changelog.d/5983.feature b/changelog.d/5983.feature
new file mode 100644
index 0000000000..aa23ee6dcd
--- /dev/null
+++ b/changelog.d/5983.feature
@@ -0,0 +1 @@
+Add minimum opentracing for client servlets.
diff --git a/changelog.d/5984.bugfix b/changelog.d/5984.bugfix
new file mode 100644
index 0000000000..3387bf82bb
--- /dev/null
+++ b/changelog.d/5984.bugfix
@@ -0,0 +1 @@
+Fix sending of EDUs when opentracing is enabled with an empty whitelist.
diff --git a/changelog.d/5985.feature b/changelog.d/5985.feature
new file mode 100644
index 0000000000..e5e29504af
--- /dev/null
+++ b/changelog.d/5985.feature
@@ -0,0 +1 @@
+Check at setup that opentracing is installed if it's enabled in the config.
diff --git a/changelog.d/5986.feature b/changelog.d/5986.feature
new file mode 100644
index 0000000000..f56aec1b32
--- /dev/null
+++ b/changelog.d/5986.feature
@@ -0,0 +1 @@
+Trace replication send times.
diff --git a/changelog.d/5988.bugfix b/changelog.d/5988.bugfix
new file mode 100644
index 0000000000..5c3597cb53
--- /dev/null
+++ b/changelog.d/5988.bugfix
@@ -0,0 +1 @@
+Fix invalid references to None while opentracing if the log context slips.
diff --git a/changelog.d/5989.misc b/changelog.d/5989.misc
new file mode 100644
index 0000000000..9f2525fd3e
--- /dev/null
+++ b/changelog.d/5989.misc
@@ -0,0 +1 @@
+Clean up dependency checking at setup.
diff --git a/changelog.d/5991.bugfix b/changelog.d/5991.bugfix
new file mode 100644
index 0000000000..5c3597cb53
--- /dev/null
+++ b/changelog.d/5991.bugfix
@@ -0,0 +1 @@
+Fix invalid references to None while opentracing if the log context slips.
diff --git a/changelog.d/5993.feature b/changelog.d/5993.feature
new file mode 100644
index 0000000000..3e8bf5068d
--- /dev/null
+++ b/changelog.d/5993.feature
@@ -0,0 +1 @@
+Add the ability to send registration emails from the homeserver rather than delegating to an identity server.
diff --git a/changelog.d/5994.feature b/changelog.d/5994.feature
new file mode 100644
index 0000000000..5b69b97fe7
--- /dev/null
+++ b/changelog.d/5994.feature
@@ -0,0 +1 @@
+Add the ability to send registration emails from the homeserver rather than delegating to an identity server.
\ No newline at end of file
diff --git a/changelog.d/5995.bugfix b/changelog.d/5995.bugfix
new file mode 100644
index 0000000000..e03ab98bc6
--- /dev/null
+++ b/changelog.d/5995.bugfix
@@ -0,0 +1 @@
+Return a M_MISSING_PARAM if `sid` is not provided to `/account/3pid`.
\ No newline at end of file
diff --git a/changelog.d/5998.bugfix b/changelog.d/5998.bugfix
new file mode 100644
index 0000000000..9ea095103b
--- /dev/null
+++ b/changelog.d/5998.bugfix
@@ -0,0 +1 @@
+Fix room and user stats tracking.
diff --git a/changelog.d/6.bugfix b/changelog.d/6.bugfix
new file mode 100644
index 0000000000..43ab65cc95
--- /dev/null
+++ b/changelog.d/6.bugfix
@@ -0,0 +1 @@
+Don't forbid membership events which membership isn't 'join' or 'invite' in restricted rooms, so that users who got into these rooms before the access rules started to be enforced can leave them.
diff --git a/changelog.d/6003.misc b/changelog.d/6003.misc
new file mode 100644
index 0000000000..4152d05f87
--- /dev/null
+++ b/changelog.d/6003.misc
@@ -0,0 +1 @@
+Add opentracing span over HTTP push processing.
diff --git a/changelog.d/6004.bugfix b/changelog.d/6004.bugfix
new file mode 100644
index 0000000000..45c179c8fd
--- /dev/null
+++ b/changelog.d/6004.bugfix
@@ -0,0 +1 @@
+Only count real users when checking for auto-creation of auto-join room.
diff --git a/changelog.d/6005.feature b/changelog.d/6005.feature
new file mode 100644
index 0000000000..ed6491d3e4
--- /dev/null
+++ b/changelog.d/6005.feature
@@ -0,0 +1 @@
+The new Prometheus metric `synapse_build_info` exposes the Python version, OS version, and Synapse version of the running server.
diff --git a/changelog.d/6009.misc b/changelog.d/6009.misc
new file mode 100644
index 0000000000..fea479e1dd
--- /dev/null
+++ b/changelog.d/6009.misc
@@ -0,0 +1 @@
+Small refactor of function arguments and docstrings in RoomMemberHandler.
\ No newline at end of file
diff --git a/changelog.d/6010.misc b/changelog.d/6010.misc
new file mode 100644
index 0000000000..0659f12ebd
--- /dev/null
+++ b/changelog.d/6010.misc
@@ -0,0 +1 @@
+Remove unused `origin` argument on FederationHandler.add_display_name_to_third_party_invite.
\ No newline at end of file
diff --git a/changelog.d/6011.feature b/changelog.d/6011.feature
new file mode 100644
index 0000000000..ad16acb12b
--- /dev/null
+++ b/changelog.d/6011.feature
@@ -0,0 +1 @@
+Use account_threepid_delegate.email and account_threepid_delegate.msisdn for validating threepid sessions.
\ No newline at end of file
diff --git a/changelog.d/6012.feature b/changelog.d/6012.feature
new file mode 100644
index 0000000000..25425510c6
--- /dev/null
+++ b/changelog.d/6012.feature
@@ -0,0 +1 @@
+Add report_stats_endpoint option to configure where stats are reported to, if enabled. Contributed by @Sorunome.
diff --git a/changelog.d/6013.misc b/changelog.d/6013.misc
new file mode 100644
index 0000000000..939fe8c655
--- /dev/null
+++ b/changelog.d/6013.misc
@@ -0,0 +1 @@
+Compatibility with v2 Identity Service APIs other than /lookup.
\ No newline at end of file
diff --git a/changelog.d/6015.feature b/changelog.d/6015.feature
new file mode 100644
index 0000000000..42aaffced9
--- /dev/null
+++ b/changelog.d/6015.feature
@@ -0,0 +1 @@
+Add config option to increase ratelimits for room admins redacting messages.
diff --git a/changelog.d/6017.misc b/changelog.d/6017.misc
new file mode 100644
index 0000000000..5ccab9c6ca
--- /dev/null
+++ b/changelog.d/6017.misc
@@ -0,0 +1 @@
+Clean up some code in the retry logic.
diff --git a/changelog.d/6020.bugfix b/changelog.d/6020.bugfix
new file mode 100644
index 0000000000..58a7deba9d
--- /dev/null
+++ b/changelog.d/6020.bugfix
@@ -0,0 +1 @@
+Ensure support users can be registered even if MAU limit is reached.
diff --git a/changelog.d/6023.misc b/changelog.d/6023.misc
new file mode 100644
index 0000000000..d80410c22c
--- /dev/null
+++ b/changelog.d/6023.misc
@@ -0,0 +1 @@
+Fix the structured logging tests stomping on the global log configuration for subsequent tests.
diff --git a/changelog.d/6024.bugfix b/changelog.d/6024.bugfix
new file mode 100644
index 0000000000..ddad34595b
--- /dev/null
+++ b/changelog.d/6024.bugfix
@@ -0,0 +1 @@
+Fix bug where login error was shown incorrectly on SSO fallback login.
diff --git a/changelog.d/6025.bugfix b/changelog.d/6025.bugfix
new file mode 100644
index 0000000000..50d7f9aab5
--- /dev/null
+++ b/changelog.d/6025.bugfix
@@ -0,0 +1 @@
+Fix bug in calculating the federation retry backoff period.
\ No newline at end of file
diff --git a/changelog.d/6026.feature b/changelog.d/6026.feature
new file mode 100644
index 0000000000..2489ff09b5
--- /dev/null
+++ b/changelog.d/6026.feature
@@ -0,0 +1 @@
+Stop sending federation transactions to servers which have been down for a long time.
diff --git a/changelog.d/6125.feature b/changelog.d/6125.feature
new file mode 100644
index 0000000000..cbe5f8d3c8
--- /dev/null
+++ b/changelog.d/6125.feature
@@ -0,0 +1 @@
+Reject all pending invites for a user during deactivation.
diff --git a/changelog.d/6147.bugfix b/changelog.d/6147.bugfix
new file mode 100644
index 0000000000..b0f936d280
--- /dev/null
+++ b/changelog.d/6147.bugfix
@@ -0,0 +1 @@
+Don't 500 when trying to exchange a revoked 3PID invite.
diff --git a/changelog.d/6238.feature b/changelog.d/6238.feature
new file mode 100644
index 0000000000..d225ac33b6
--- /dev/null
+++ b/changelog.d/6238.feature
@@ -0,0 +1 @@
+Add support for outbound http proxying via http_proxy/HTTPS_PROXY env vars.
diff --git a/changelog.d/6436.bugfix b/changelog.d/6436.bugfix
new file mode 100644
index 0000000000..954a4e1d84
--- /dev/null
+++ b/changelog.d/6436.bugfix
@@ -0,0 +1 @@
+Fix a bug where a room could become unusable with a low retention policy and a low activity.
diff --git a/changelog.d/9.misc b/changelog.d/9.misc
new file mode 100644
index 0000000000..24fd12c978
--- /dev/null
+++ b/changelog.d/9.misc
@@ -0,0 +1 @@
+Add SyTest to the BuildKite CI.
diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py
index af8f39c8c2..48da410d94 100755
--- a/contrib/cmdclient/console.py
+++ b/contrib/cmdclient/console.py
@@ -37,6 +37,8 @@ from signedjson.sign import verify_signed_json, SignatureVerifyException
CONFIG_JSON = "cmdclient_config.json"
+# TODO: The concept of trusted identity servers has been deprecated. This option and checks
+# should be removed
TRUSTED_ID_SERVERS = ["localhost:8001"]
@@ -268,6 +270,7 @@ class SynapseCmd(cmd.Cmd):
@defer.inlineCallbacks
def _do_emailrequest(self, args):
+ # TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/validate/email/requestToken"
@@ -302,6 +305,7 @@ class SynapseCmd(cmd.Cmd):
@defer.inlineCallbacks
def _do_emailvalidate(self, args):
+ # TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/validate/email/submitToken"
@@ -330,6 +334,7 @@ class SynapseCmd(cmd.Cmd):
@defer.inlineCallbacks
def _do_3pidbind(self, args):
+ # TODO: Update to use v2 Identity Service API endpoint
url = self._identityServerUrl() + "/_matrix/identity/api/v1/3pid/bind"
json_res = yield self.http_client.do_request(
@@ -398,6 +403,7 @@ class SynapseCmd(cmd.Cmd):
@defer.inlineCallbacks
def _do_invite(self, roomid, userstring):
if not userstring.startswith("@") and self._is_on("complete_usernames"):
+ # TODO: Update to use v2 Identity Service API endpoint
url = self._identityServerUrl() + "/_matrix/identity/api/v1/lookup"
json_res = yield self.http_client.do_request(
@@ -407,6 +413,7 @@ class SynapseCmd(cmd.Cmd):
mxid = None
if "mxid" in json_res and "signatures" in json_res:
+ # TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/pubkey/ed25519"
diff --git a/docker/README.md b/docker/README.md
index 46bb9d2d99..d5879c2f2c 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -17,7 +17,7 @@ By default, the image expects a single volume, located at ``/data``, that will h
* the appservices configuration.
You are free to use separate volumes depending on storage endpoints at your
-disposal. For instance, ``/data/media`` coud be stored on a large but low
+disposal. For instance, ``/data/media`` could be stored on a large but low
performance hdd storage while other files could be stored on high performance
endpoints.
@@ -27,8 +27,8 @@ configuration file there. Multiple application services are supported.
## Generating a configuration file
-The first step is to genearte a valid config file. To do this, you can run the
-image with the `generate` commandline option.
+The first step is to generate a valid config file. To do this, you can run the
+image with the `generate` command line option.
You will need to specify values for the `SYNAPSE_SERVER_NAME` and
`SYNAPSE_REPORT_STATS` environment variable, and mount a docker volume to store
@@ -59,7 +59,7 @@ The following environment variables are supported in `generate` mode:
* `SYNAPSE_CONFIG_PATH`: path to the file to be generated. Defaults to
`<SYNAPSE_CONFIG_DIR>/homeserver.yaml`.
* `SYNAPSE_DATA_DIR`: where the generated config will put persistent data
- such as the datatase and media store. Defaults to `/data`.
+ such as the database and media store. Defaults to `/data`.
* `UID`, `GID`: the user id and group id to use for creating the data
directories. Defaults to `991`, `991`.
@@ -115,7 +115,7 @@ not given).
To migrate from a dynamic configuration file to a static one, run the docker
container once with the environment variables set, and `migrate_config`
-commandline option. For example:
+command line option. For example:
```
docker run -it --rm \
diff --git a/docker/start.py b/docker/start.py
index 40a861f200..260f2d9943 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -41,8 +41,8 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
config_dir (str): where to put generated config files
config_path (str): where to put the main config file
environ (dict): environment dictionary
- ownership (str): "<user>:<group>" string which will be used to set
- ownership of the generated configs
+ ownership (str|None): "<user>:<group>" string which will be used to set
+ ownership of the generated configs. If None, ownership will not change.
"""
for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"):
if v not in environ:
@@ -105,24 +105,24 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
log("Generating log config file " + log_config_file)
convert("/conf/log.config", log_config_file, environ)
- subprocess.check_output(["chown", "-R", ownership, "/data"])
-
# Hopefully we already have a signing key, but generate one if not.
- subprocess.check_output(
- [
- "su-exec",
- ownership,
- "python",
- "-m",
- "synapse.app.homeserver",
- "--config-path",
- config_path,
- # tell synapse to put generated keys in /data rather than /compiled
- "--keys-directory",
- config_dir,
- "--generate-keys",
- ]
- )
+ args = [
+ "python",
+ "-m",
+ "synapse.app.homeserver",
+ "--config-path",
+ config_path,
+ # tell synapse to put generated keys in /data rather than /compiled
+ "--keys-directory",
+ config_dir,
+ "--generate-keys",
+ ]
+
+ if ownership is not None:
+ subprocess.check_output(["chown", "-R", ownership, "/data"])
+ args = ["su-exec", ownership] + args
+
+ subprocess.check_output(args)
def run_generate_config(environ, ownership):
@@ -130,7 +130,7 @@ def run_generate_config(environ, ownership):
Args:
environ (dict): env var dict
- ownership (str): "userid:groupid" arg for chmod
+ ownership (str|None): "userid:groupid" arg for chmod. If None, ownership will not change.
Never returns.
"""
@@ -149,9 +149,6 @@ def run_generate_config(environ, ownership):
log("Creating log config %s" % (log_config_file,))
convert("/conf/log.config", log_config_file, environ)
- # make sure that synapse has perms to write to the data dir.
- subprocess.check_output(["chown", ownership, data_dir])
-
args = [
"python",
"-m",
@@ -170,12 +167,33 @@ def run_generate_config(environ, ownership):
"--open-private-ports",
]
# log("running %s" % (args, ))
- os.execv("/usr/local/bin/python", args)
+
+ if ownership is not None:
+ args = ["su-exec", ownership] + args
+ os.execv("/sbin/su-exec", args)
+
+ # make sure that synapse has perms to write to the data dir.
+ subprocess.check_output(["chown", ownership, data_dir])
+ else:
+ os.execv("/usr/local/bin/python", args)
def main(args, environ):
mode = args[1] if len(args) > 1 else None
- ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991))
+ desired_uid = int(environ.get("UID", "991"))
+ desired_gid = int(environ.get("GID", "991"))
+ if (desired_uid == os.getuid()) and (desired_gid == os.getgid()):
+ ownership = None
+ else:
+ ownership = "{}:{}".format(desired_uid, desired_gid)
+
+ log(
+ "Container running as UserID %s:%s, ENV (or defaults) requests %s:%s"
+ % (os.getuid(), os.getgid(), desired_uid, desired_gid)
+ )
+
+ if ownership is None:
+ log("Will not perform chmod/su-exec as UserID already matches request")
# In generate mode, generate a configuration and missing keys, then exit
if mode == "generate":
@@ -227,16 +245,12 @@ def main(args, environ):
log("Starting synapse with config file " + config_path)
- args = [
- "su-exec",
- ownership,
- "python",
- "-m",
- "synapse.app.homeserver",
- "--config-path",
- config_path,
- ]
- os.execv("/sbin/su-exec", args)
+ args = ["python", "-m", "synapse.app.homeserver", "--config-path", config_path]
+ if ownership is not None:
+ args = ["su-exec", ownership] + args
+ os.execv("/sbin/su-exec", args)
+ else:
+ os.execv("/usr/local/bin/python", args)
if __name__ == "__main__":
diff --git a/docs/admin_api/purge_room.md b/docs/admin_api/purge_room.md
new file mode 100644
index 0000000000..64ea7b6a64
--- /dev/null
+++ b/docs/admin_api/purge_room.md
@@ -0,0 +1,18 @@
+Purge room API
+==============
+
+This API will remove all trace of a room from your database.
+
+All local users must have left the room before it can be removed.
+
+The API is:
+
+```
+POST /_synapse/admin/v1/purge_room
+
+{
+ "room_id": "!room:id"
+}
+```
+
+You must authenticate using the access token of an admin user.
diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst
index 213359d0c0..d0871f9438 100644
--- a/docs/admin_api/user_admin_api.rst
+++ b/docs/admin_api/user_admin_api.rst
@@ -84,3 +84,42 @@ with a body of:
}
including an ``access_token`` of a server admin.
+
+
+Get whether a user is a server administrator or not
+===================================================
+
+
+The api is::
+
+ GET /_synapse/admin/v1/users/<user_id>/admin
+
+including an ``access_token`` of a server admin.
+
+A response body like the following is returned:
+
+.. code:: json
+
+ {
+ "admin": true
+ }
+
+
+Change whether a user is a server administrator or not
+======================================================
+
+Note that you cannot demote yourself.
+
+The api is::
+
+ PUT /_synapse/admin/v1/users/<user_id>/admin
+
+with a body of:
+
+.. code:: json
+
+ {
+ "admin": true
+ }
+
+including an ``access_token`` of a server admin.
diff --git a/docs/opentracing.rst b/docs/opentracing.rst
index b91a2208a8..6e98ab56ba 100644
--- a/docs/opentracing.rst
+++ b/docs/opentracing.rst
@@ -32,7 +32,7 @@ It is up to the remote server to decide what it does with the spans
it creates. This is called the sampling policy and it can be configured
through Jaeger's settings.
-For OpenTracing concepts see
+For OpenTracing concepts see
https://opentracing.io/docs/overview/what-is-tracing/.
For more information about Jaeger's implementation see
@@ -79,7 +79,7 @@ Homeserver whitelisting
The homeserver whitelist is configured using regular expressions. A list of regular
expressions can be given and their union will be compared when propagating any
-spans contexts to another homeserver.
+spans contexts to another homeserver.
Though it's mostly safe to send and receive span contexts to and from
untrusted users since span contexts are usually opaque ids it can lead to
@@ -92,6 +92,29 @@ two problems, namely:
but that doesn't prevent another server sending you baggage which will be logged
to OpenTracing's logs.
+==========
+EDU FORMAT
+==========
+
+EDUs can contain tracing data in their content. This is not specced but
+it could be of interest for other homeservers.
+
+EDU format (if you're using jaeger):
+
+.. code-block:: json
+
+ {
+ "edu_type": "type",
+ "content": {
+ "org.matrix.opentracing_context": {
+ "uber-trace-id": "fe57cf3e65083289"
+ }
+ }
+ }
+
+Though you don't have to use jaeger you must inject the span context into
+`org.matrix.opentracing_context` using the opentracing `Format.TEXT_MAP` inject method.
+
==================
Configuring Jaeger
==================
diff --git a/docs/room_and_user_statistics.md b/docs/room_and_user_statistics.md
new file mode 100644
index 0000000000..e1facb38d4
--- /dev/null
+++ b/docs/room_and_user_statistics.md
@@ -0,0 +1,62 @@
+Room and User Statistics
+========================
+
+Synapse maintains room and user statistics (as well as a cache of room state),
+in various tables. These can be used for administrative purposes but are also
+used when generating the public room directory.
+
+
+# Synapse Developer Documentation
+
+## High-Level Concepts
+
+### Definitions
+
+* **subject**: Something we are tracking stats about – currently a room or user.
+* **current row**: An entry for a subject in the appropriate current statistics
+ table. Each subject can have only one.
+* **historical row**: An entry for a subject in the appropriate historical
+ statistics table. Each subject can have any number of these.
+
+### Overview
+
+Stats are maintained as time series. There are two kinds of column:
+
+* absolute columns – where the value is correct for the time given by `end_ts`
+ in the stats row. (Imagine a line graph for these values)
+ * They can also be thought of as 'gauges' in Prometheus, if you are familiar.
+* per-slice columns – where the value corresponds to how many of the occurrences
+ occurred within the time slice given by `(end_ts − bucket_size)…end_ts`
+ or `start_ts…end_ts`. (Imagine a histogram for these values)
+
+Stats are maintained in two tables (for each type): current and historical.
+
+Current stats correspond to the present values. Each subject can only have one
+entry.
+
+Historical stats correspond to values in the past. Subjects may have multiple
+entries.
+
+## Concepts around the management of stats
+
+### Current rows
+
+Current rows contain the most up-to-date statistics for a room.
+They only contain absolute columns
+
+### Historical rows
+
+Historical rows can always be considered to be valid for the time slice and
+end time specified.
+
+* historical rows will not exist for every time slice – they will be omitted
+ if there were no changes. In this case, the following assumptions can be
+ made to interpolate/recreate missing rows:
+ - absolute fields have the same values as in the preceding row
+ - per-slice fields are zero (`0`)
+* historical rows will not be retained forever – rows older than a configurable
+ time will be purged.
+
+#### Purge
+
+The purging of historical rows is not yet implemented.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 0c6be30e51..641108de17 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -54,6 +54,13 @@ pid_file: DATADIR/homeserver.pid
#
#require_auth_for_profile_requests: true
+# Whether to require a user to share a room with another user in order
+# to retrieve their profile information. Only checked on Client-Server
+# requests. Profile requests from other servers should be checked by the
+# requesting server. Defaults to 'false'.
+#
+# limit_profile_requests_to_known_users: true
+
# If set to 'false', requires authentication to access the server's public rooms
# directory through the client API. Defaults to 'true'.
#
@@ -205,9 +212,9 @@ listeners:
#
- port: 8008
tls: false
- bind_addresses: ['::1', '127.0.0.1']
type: http
x_forwarded: true
+ bind_addresses: ['::1', '127.0.0.1']
resources:
- names: [client, federation]
@@ -306,6 +313,81 @@ listeners:
#
#allow_per_room_profiles: false
+# Whether to show the users on this homeserver in the user directory. Defaults to
+# 'true'.
+#
+#show_users_in_user_directory: false
+
+# Message retention policy at the server level.
+#
+# Room admins and mods can define a retention period for their rooms using the
+# 'm.room.retention' state event, and server admins can cap this period by setting
+# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
+#
+# If this feature is enabled, Synapse will regularly look for and purge events
+# which are older than the room's maximum retention period. Synapse will also
+# filter events received over federation so that events that should have been
+# purged are ignored and not stored again.
+#
+retention:
+ # The message retention policies feature is disabled by default. Uncomment the
+ # following line to enable it.
+ #
+ #enabled: true
+
+ # Default retention policy. If set, Synapse will apply it to rooms that lack the
+ # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
+ # matter much because Synapse doesn't take it into account yet.
+ #
+ #default_policy:
+ # min_lifetime: 1d
+ # max_lifetime: 1y
+
+ # Retention policy limits. If set, a user won't be able to send a
+ # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
+ # that's not within this range. This is especially useful in closed federations,
+ # in which server admins can make sure every federating server applies the same
+ # rules.
+ #
+ #allowed_lifetime_min: 1d
+ #allowed_lifetime_max: 1y
+
+ # Server admins can define the settings of the background jobs purging the
+ # events which lifetime has expired under the 'purge_jobs' section.
+ #
+ # If no configuration is provided, a single job will be set up to delete expired
+ # events in every room daily.
+ #
+ # Each job's configuration defines which range of message lifetimes the job
+ # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
+ # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
+ # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
+ # lower than or equal to 3 days. Both the minimum and the maximum value of a
+ # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
+ # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
+ # which 'max_lifetime' is lower than or equal to three days.
+ #
+ # The rationale for this per-job configuration is that some rooms might have a
+ # retention policy with a low 'max_lifetime', where history needs to be purged
+ # of outdated messages on a very frequent basis (e.g. every 5min), but not want
+ # that purge to be performed by a job that's iterating over every room it knows,
+ # which would be quite heavy on the server.
+ #
+ #purge_jobs:
+ # - shortest_max_lifetime: 1d
+ # longest_max_lifetime: 3d
+ # interval: 5m:
+ # - shortest_max_lifetime: 3d
+ # longest_max_lifetime: 1y
+ # interval: 24h
+
+# How long to keep redacted events in unredacted form in the database. After
+# this period redacted events get replaced with their redacted form in the DB.
+#
+# Defaults to `7d`. Set to `null` to disable.
+#
+redaction_retention_period: 7d
+
## TLS ##
@@ -392,10 +474,10 @@ listeners:
# permission to listen on port 80.
#
acme:
- # ACME support is disabled by default. Uncomment the following line
- # (and tls_certificate_path and tls_private_key_path above) to enable it.
+ # ACME support is disabled by default. Set this to `true` and uncomment
+ # tls_certificate_path and tls_private_key_path above to enable it.
#
- #enabled: true
+ enabled: False
# Endpoint to use to request certificates. If you only want to test,
# use Let's Encrypt's staging url:
@@ -406,17 +488,17 @@ acme:
# Port number to listen on for the HTTP-01 challenge. Change this if
# you are forwarding connections through Apache/Nginx/etc.
#
- #port: 80
+ port: 80
# Local addresses to listen on for incoming connections.
# Again, you may want to change this if you are forwarding connections
# through Apache/Nginx/etc.
#
- #bind_addresses: ['::', '0.0.0.0']
+ bind_addresses: ['::', '0.0.0.0']
# How many days remaining on a certificate before it is renewed.
#
- #reprovision_threshold: 30
+ reprovision_threshold: 30
# The domain that the certificate should be for. Normally this
# should be the same as your Matrix domain (i.e., 'server_name'), but,
@@ -430,7 +512,7 @@ acme:
#
# If not set, defaults to your 'server_name'.
#
- #domain: matrix.example.com
+ domain: matrix.example.com
# file to use for the account key. This will be generated if it doesn't
# exist.
@@ -485,7 +567,8 @@ database:
## Logging ##
-# A yaml python logging config file
+# A yaml python logging config file as described by
+# https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
#
log_config: "CONFDIR/SERVERNAME.log.config"
@@ -510,6 +593,11 @@ log_config: "CONFDIR/SERVERNAME.log.config"
# - one for login that ratelimits login requests based on the account the
# client is attempting to log into, based on the amount of failed login
# attempts for this account.
+# - one that ratelimits third-party invites requests based on the account
+# that's making the requests.
+# - one for ratelimiting redactions by room admins. If this is not explicitly
+# set then it uses the same ratelimiting as per rc_message. This is useful
+# to allow room admins to deal with abuse quickly.
#
# The defaults are as shown below.
#
@@ -531,6 +619,14 @@ log_config: "CONFDIR/SERVERNAME.log.config"
# failed_attempts:
# per_second: 0.17
# burst_count: 3
+#
+#rc_third_party_invite:
+# per_second: 0.2
+# burst_count: 10
+#
+#rc_admin_redaction:
+# per_second: 1
+# burst_count: 50
# Ratelimiting settings for incoming federation
@@ -599,6 +695,30 @@ uploads_path: "DATADIR/uploads"
#
#max_upload_size: 10M
+# The largest allowed size for a user avatar. If not defined, no
+# restriction will be imposed.
+#
+# Note that this only applies when an avatar is changed globally.
+# Per-room avatar changes are not affected. See allow_per_room_profiles
+# for disabling that functionality.
+#
+# Note that user avatar changes will not work if this is set without
+# using Synapse's local media repo.
+#
+#max_avatar_size: 10M
+
+# Allow mimetypes for a user avatar. If not defined, no restriction will
+# be imposed.
+#
+# Note that this only applies when an avatar is changed globally.
+# Per-room avatar changes are not affected. See allow_per_room_profiles
+# for disabling that functionality.
+#
+# Note that user avatar changes will not work if this is set without
+# using Synapse's local media repo.
+#
+#allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"]
+
# Maximum number of pixels that will be thumbnailed
#
#max_image_pixels: 32M
@@ -842,9 +962,32 @@ uploads_path: "DATADIR/uploads"
#
#disable_msisdn_registration: true
+# Derive the user's matrix ID from a type of 3PID used when registering.
+# This overrides any matrix ID the user proposes when calling /register
+# The 3PID type should be present in registrations_require_3pid to avoid
+# users failing to register if they don't specify the right kind of 3pid.
+#
+#register_mxid_from_3pid: email
+
+# Uncomment to set the display name of new users to their email address,
+# rather than using the default heuristic.
+#
+#register_just_use_email_for_display_name: true
+
# Mandate that users are only allowed to associate certain formats of
# 3PIDs with accounts on this server.
#
+# Use an Identity Server to establish which 3PIDs are allowed to register?
+# Overrides allowed_local_3pids below.
+#
+#check_is_for_allowed_local_3pids: matrix.org
+#
+# If you are using an IS you can also check whether that IS registers
+# pending invites for the given 3PID (and then allow it to sign up on
+# the platform):
+#
+#allow_invited_3pids: False
+#
#allowed_local_3pids:
# - medium: email
# pattern: '.*@matrix\.org'
@@ -853,6 +996,11 @@ uploads_path: "DATADIR/uploads"
# - medium: msisdn
# pattern: '\+44'
+# If true, stop users from trying to change the 3PIDs associated with
+# their accounts.
+#
+#disable_3pid_changes: False
+
# Enable 3PIDs lookup requests to identity servers from this server.
#
#enable_3pid_lookup: true
@@ -890,10 +1038,66 @@ uploads_path: "DATADIR/uploads"
# Also defines the ID server which will be called when an account is
# deactivated (one will be picked arbitrarily).
#
+# Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
+# server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
+# background migration script, informing itself that the identity server all of its
+# 3PIDs have been bound to is likely one of the below.
+#
+# As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
+# it is now solely used for the purposes of the background migration script, and can be
+# removed once it has run.
#trusted_third_party_id_servers:
# - matrix.org
# - vector.im
+# If enabled, user IDs, display names and avatar URLs will be replicated
+# to this server whenever they change.
+# This is an experimental API currently implemented by sydent to support
+# cross-homeserver user directories.
+#
+#replicate_user_profiles_to: example.com
+
+# If specified, attempt to replay registrations, profile changes & 3pid
+# bindings on the given target homeserver via the AS API. The HS is authed
+# via a given AS token.
+#
+#shadow_server:
+# hs_url: https://shadow.example.com
+# hs: shadow.example.com
+# as_token: 12u394refgbdhivsia
+
+# If enabled, don't let users set their own display names/avatars
+# other than for the very first time (unless they are a server admin).
+# Useful when provisioning users based on the contents of a 3rd party
+# directory and to avoid ambiguities.
+#
+#disable_set_displayname: False
+#disable_set_avatar_url: False
+
+# Handle threepid (email/phone etc) registration and password resets through a set of
+# *trusted* identity servers. Note that this allows the configured identity server to
+# reset passwords for accounts!
+#
+# Be aware that if `email` is not set, and SMTP options have not been
+# configured in the email config block, registration and user password resets via
+# email will be globally disabled.
+#
+# Additionally, if `msisdn` is not set, registration and password resets via msisdn
+# will be disabled regardless. This is due to Synapse currently not supporting any
+# method of sending SMS messages on its own.
+#
+# To enable using an identity server for operations regarding a particular third-party
+# identifier type, set the value to the URL of that identity server as shown in the
+# examples below.
+#
+# Servers handling the these requests must answer the `/requestToken` endpoints defined
+# by the Matrix Identity Service API specification:
+# https://matrix.org/docs/spec/identity_service/latest
+#
+account_threepid_delegates:
+ #email: https://example.com # Delegate email sending to matrix.org
+ #msisdn: http://localhost:8090 # Delegate SMS sending to this local process
+
# Users who register on this homeserver will automatically be joined
# to these rooms
#
@@ -925,9 +1129,24 @@ uploads_path: "DATADIR/uploads"
#sentry:
# dsn: "..."
+# Flags to enable Prometheus metrics which are not suitable to be
+# enabled by default, either for performance reasons or limited use.
+#
+metrics_flags:
+ # Publish synapse_federation_known_servers, a g auge of the number of
+ # servers this homeserver knows about, including itself. May cause
+ # performance problems on large homeservers.
+ #
+ #known_servers: true
+
# Whether or not to report anonymized homeserver usage statistics.
# report_stats: true|false
+# The endpoint to report the anonymized homeserver usage statistics to.
+# Defaults to https://matrix.org/report-usage-stats/push
+#
+#report_stats_endpoint: https://example.com/report-usage-stats/push
+
## API Configuration ##
@@ -1027,6 +1246,14 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key"
#
#trusted_key_servers:
# - server_name: "matrix.org"
+#
+
+# The signing keys to use when acting as a trusted key server. If not specified
+# defaults to the server signing key.
+#
+# Can contain multiple keys, one per line.
+#
+#key_server_signing_keys_path: "key_server_signing_keys.key"
# Enable SAML2 for registration and login. Uses pysaml2.
@@ -1127,6 +1354,36 @@ password_config:
#
#pepper: "EVEN_MORE_SECRET"
+ # Define and enforce a password policy. Each parameter is optional, boolean
+ # parameters default to 'false' and integer parameters default to 0.
+ # This is an early implementation of MSC2000.
+ #
+ #policy:
+ # Whether to enforce the password policy.
+ #
+ #enabled: true
+
+ # Minimum accepted length for a password.
+ #
+ #minimum_length: 15
+
+ # Whether a password must contain at least one digit.
+ #
+ #require_digit: true
+
+ # Whether a password must contain at least one symbol.
+ # A symbol is any character that's not a number or a letter.
+ #
+ #require_symbol: true
+
+ # Whether a password must contain at least one lowercase letter.
+ #
+ #require_lowercase: true
+
+ # Whether a password must contain at least one lowercase letter.
+ #
+ #require_uppercase: true
+
# Enable sending emails for password resets, notification events or
@@ -1155,19 +1412,6 @@ password_config:
# #
# riot_base_url: "http://localhost/riot"
#
-# # Enable sending password reset emails via the configured, trusted
-# # identity servers
-# #
-# # IMPORTANT! This will give a malicious or overtaken identity server
-# # the ability to reset passwords for your users! Make absolutely sure
-# # that you want to do this! It is strongly recommended that password
-# # reset emails be sent by the homeserver instead
-# #
-# # If this option is set to false and SMTP options have not been
-# # configured, resetting user passwords via email will be disabled
-# #
-# #trust_identity_server_for_password_resets: false
-#
# # Configure the time that a validation email or text message code
# # will expire after sending
# #
@@ -1199,11 +1443,22 @@ password_config:
# #password_reset_template_html: password_reset.html
# #password_reset_template_text: password_reset.txt
#
+# # Templates for registration emails sent by the homeserver
+# #
+# #registration_template_html: registration.html
+# #registration_template_text: registration.txt
+#
# # Templates for password reset success and failure pages that a user
# # will see after attempting to reset their password
# #
# #password_reset_template_success_html: password_reset_success.html
# #password_reset_template_failure_html: password_reset_failure.html
+#
+# # Templates for registration success and failure pages that a user
+# # will see after attempting to register using an email or phone
+# #
+# #registration_template_success_html: registration_success.html
+# #registration_template_failure_html: registration_failure.html
#password_providers:
@@ -1271,6 +1526,11 @@ password_config:
#user_directory:
# enabled: true
# search_all_users: false
+#
+# # If this is set, user search will be delegated to this ID server instead
+# # of synapse performing the search itself.
+# # This is an experimental API.
+# defer_to_id_server: https://id.example.com
# User Consent configuration
diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py
index ca4b879526..5c5a115ca9 100644
--- a/docs/sphinx/conf.py
+++ b/docs/sphinx/conf.py
@@ -12,8 +12,8 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import sys
import os
+import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@@ -191,11 +191,11 @@ htmlhelp_basename = "Synapsedoc"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
- #'papersize': 'letterpaper',
+ # 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
- #'pointsize': '10pt',
+ # 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
- #'preamble': '',
+ # 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
diff --git a/docs/structured_logging.md b/docs/structured_logging.md
new file mode 100644
index 0000000000..decec9b8fa
--- /dev/null
+++ b/docs/structured_logging.md
@@ -0,0 +1,83 @@
+# Structured Logging
+
+A structured logging system can be useful when your logs are destined for a machine to parse and process. By maintaining its machine-readable characteristics, it enables more efficient searching and aggregations when consumed by software such as the "ELK stack".
+
+Synapse's structured logging system is configured via the file that Synapse's `log_config` config option points to. The file must be YAML and contain `structured: true`. It must contain a list of "drains" (places where logs go to).
+
+A structured logging configuration looks similar to the following:
+
+```yaml
+structured: true
+
+loggers:
+ synapse:
+ level: INFO
+ synapse.storage.SQL:
+ level: WARNING
+
+drains:
+ console:
+ type: console
+ location: stdout
+ file:
+ type: file_json
+ location: homeserver.log
+```
+
+The above logging config will set Synapse as 'INFO' logging level by default, with the SQL layer at 'WARNING', and will have two logging drains (to the console and to a file, stored as JSON).
+
+## Drain Types
+
+Drain types can be specified by the `type` key.
+
+### `console`
+
+Outputs human-readable logs to the console.
+
+Arguments:
+
+- `location`: Either `stdout` or `stderr`.
+
+### `console_json`
+
+Outputs machine-readable JSON logs to the console.
+
+Arguments:
+
+- `location`: Either `stdout` or `stderr`.
+
+### `console_json_terse`
+
+Outputs machine-readable JSON logs to the console, separated by newlines. This
+format is not designed to be read and re-formatted into human-readable text, but
+is optimal for a logging aggregation system.
+
+Arguments:
+
+- `location`: Either `stdout` or `stderr`.
+
+### `file`
+
+Outputs human-readable logs to a file.
+
+Arguments:
+
+- `location`: An absolute path to the file to log to.
+
+### `file_json`
+
+Outputs machine-readable logs to a file.
+
+Arguments:
+
+- `location`: An absolute path to the file to log to.
+
+### `network_json_terse`
+
+Delivers machine-readable JSON logs to a log aggregator over TCP. This is
+compatible with LogStash's TCP input with the codec set to `json_lines`.
+
+Arguments:
+
+- `host`: Hostname or IP address of the log aggregator.
+- `port`: Numerical port to contact on the host.
\ No newline at end of file
diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh
deleted file mode 100755
index 016afb8baa..0000000000
--- a/jenkins/prepare_synapse.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#! /bin/bash
-
-set -eux
-
-cd "`dirname $0`/.."
-
-TOX_DIR=$WORKSPACE/.tox
-
-mkdir -p $TOX_DIR
-
-if ! [ $TOX_DIR -ef .tox ]; then
- ln -s "$TOX_DIR" .tox
-fi
-
-# set up the virtualenv
-tox -e py27 --notest -v
diff --git a/mypy.ini b/mypy.ini
new file mode 100644
index 0000000000..8788574ee3
--- /dev/null
+++ b/mypy.ini
@@ -0,0 +1,54 @@
+[mypy]
+namespace_packages=True
+plugins=mypy_zope:plugin
+follow_imports=skip
+mypy_path=stubs
+
+[mypy-synapse.config.homeserver]
+# this is a mess because of the metaclass shenanigans
+ignore_errors = True
+
+[mypy-zope]
+ignore_missing_imports = True
+
+[mypy-constantly]
+ignore_missing_imports = True
+
+[mypy-twisted.*]
+ignore_missing_imports = True
+
+[mypy-treq.*]
+ignore_missing_imports = True
+
+[mypy-hyperlink]
+ignore_missing_imports = True
+
+[mypy-h11]
+ignore_missing_imports = True
+
+[mypy-opentracing]
+ignore_missing_imports = True
+
+[mypy-OpenSSL]
+ignore_missing_imports = True
+
+[mypy-netaddr]
+ignore_missing_imports = True
+
+[mypy-saml2.*]
+ignore_missing_imports = True
+
+[mypy-unpaddedbase64]
+ignore_missing_imports = True
+
+[mypy-canonicaljson]
+ignore_missing_imports = True
+
+[mypy-jaeger_client]
+ignore_missing_imports = True
+
+[mypy-jsonschema]
+ignore_missing_imports = True
+
+[mypy-signedjson.*]
+ignore_missing_imports = True
diff --git a/res/templates-dinsic/mail-Vector.css b/res/templates-dinsic/mail-Vector.css
new file mode 100644
index 0000000000..6a3e36eda1
--- /dev/null
+++ b/res/templates-dinsic/mail-Vector.css
@@ -0,0 +1,7 @@
+.header {
+ border-bottom: 4px solid #e4f7ed ! important;
+}
+
+.notif_link a, .footer a {
+ color: #76CFA6 ! important;
+}
diff --git a/res/templates-dinsic/mail.css b/res/templates-dinsic/mail.css
new file mode 100644
index 0000000000..5ab3e1b06d
--- /dev/null
+++ b/res/templates-dinsic/mail.css
@@ -0,0 +1,156 @@
+body {
+ margin: 0px;
+}
+
+pre, code {
+ word-break: break-word;
+ white-space: pre-wrap;
+}
+
+#page {
+ font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
+ font-color: #454545;
+ font-size: 12pt;
+ width: 100%;
+ padding: 20px;
+}
+
+#inner {
+ width: 640px;
+}
+
+.header {
+ width: 100%;
+ height: 87px;
+ color: #454545;
+ border-bottom: 4px solid #e5e5e5;
+}
+
+.logo {
+ text-align: right;
+ margin-left: 20px;
+}
+
+.salutation {
+ padding-top: 10px;
+ font-weight: bold;
+}
+
+.summarytext {
+}
+
+.room {
+ width: 100%;
+ color: #454545;
+ border-bottom: 1px solid #e5e5e5;
+}
+
+.room_header td {
+ padding-top: 38px;
+ padding-bottom: 10px;
+ border-bottom: 1px solid #e5e5e5;
+}
+
+.room_name {
+ vertical-align: middle;
+ font-size: 18px;
+ font-weight: bold;
+}
+
+.room_header h2 {
+ margin-top: 0px;
+ margin-left: 75px;
+ font-size: 20px;
+}
+
+.room_avatar {
+ width: 56px;
+ line-height: 0px;
+ text-align: center;
+ vertical-align: middle;
+}
+
+.room_avatar img {
+ width: 48px;
+ height: 48px;
+ object-fit: cover;
+ border-radius: 24px;
+}
+
+.notif {
+ border-bottom: 1px solid #e5e5e5;
+ margin-top: 16px;
+ padding-bottom: 16px;
+}
+
+.historical_message .sender_avatar {
+ opacity: 0.3;
+}
+
+/* spell out opacity and historical_message class names for Outlook aka Word */
+.historical_message .sender_name {
+ color: #e3e3e3;
+}
+
+.historical_message .message_time {
+ color: #e3e3e3;
+}
+
+.historical_message .message_body {
+ color: #c7c7c7;
+}
+
+.historical_message td,
+.message td {
+ padding-top: 10px;
+}
+
+.sender_avatar {
+ width: 56px;
+ text-align: center;
+ vertical-align: top;
+}
+
+.sender_avatar img {
+ margin-top: -2px;
+ width: 32px;
+ height: 32px;
+ border-radius: 16px;
+}
+
+.sender_name {
+ display: inline;
+ font-size: 13px;
+ color: #a2a2a2;
+}
+
+.message_time {
+ text-align: right;
+ width: 100px;
+ font-size: 11px;
+ color: #a2a2a2;
+}
+
+.message_body {
+}
+
+.notif_link td {
+ padding-top: 10px;
+ padding-bottom: 10px;
+ font-weight: bold;
+}
+
+.notif_link a, .footer a {
+ color: #454545;
+ text-decoration: none;
+}
+
+.debug {
+ font-size: 10px;
+ color: #888;
+}
+
+.footer {
+ margin-top: 20px;
+ text-align: center;
+}
\ No newline at end of file
diff --git a/res/templates-dinsic/notif.html b/res/templates-dinsic/notif.html
new file mode 100644
index 0000000000..bcdfeea9da
--- /dev/null
+++ b/res/templates-dinsic/notif.html
@@ -0,0 +1,45 @@
+{% for message in notif.messages %}
+ <tr class="{{ "historical_message" if message.is_historical else "message" }}">
+ <td class="sender_avatar">
+ {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
+ {% if message.sender_avatar_url %}
+ <img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
+ {% else %}
+ {% if message.sender_hash % 3 == 0 %}
+ <img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
+ {% elif message.sender_hash % 3 == 1 %}
+ <img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
+ {% else %}
+ <img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
+ {% endif %}
+ {% endif %}
+ {% endif %}
+ </td>
+ <td class="message_contents">
+ {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
+ <div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
+ {% endif %}
+ <div class="message_body">
+ {% if message.msgtype == "m.text" %}
+ {{ message.body_text_html }}
+ {% elif message.msgtype == "m.emote" %}
+ {{ message.body_text_html }}
+ {% elif message.msgtype == "m.notice" %}
+ {{ message.body_text_html }}
+ {% elif message.msgtype == "m.image" %}
+ <img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
+ {% elif message.msgtype == "m.file" %}
+ <span class="filename">{{ message.body_text_plain }}</span>
+ {% endif %}
+ </div>
+ </td>
+ <td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
+ </tr>
+{% endfor %}
+<tr class="notif_link">
+ <td></td>
+ <td>
+ <a href="{{ notif.link }}">Voir {{ room.title }}</a>
+ </td>
+ <td></td>
+</tr>
diff --git a/res/templates-dinsic/notif.txt b/res/templates-dinsic/notif.txt
new file mode 100644
index 0000000000..3dff1bb570
--- /dev/null
+++ b/res/templates-dinsic/notif.txt
@@ -0,0 +1,16 @@
+{% for message in notif.messages %}
+{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
+{% if message.msgtype == "m.text" %}
+{{ message.body_text_plain }}
+{% elif message.msgtype == "m.emote" %}
+{{ message.body_text_plain }}
+{% elif message.msgtype == "m.notice" %}
+{{ message.body_text_plain }}
+{% elif message.msgtype == "m.image" %}
+{{ message.body_text_plain }}
+{% elif message.msgtype == "m.file" %}
+{{ message.body_text_plain }}
+{% endif %}
+{% endfor %}
+
+Voir {{ room.title }} à {{ notif.link }}
diff --git a/res/templates-dinsic/notif_mail.html b/res/templates-dinsic/notif_mail.html
new file mode 100644
index 0000000000..1e1efa74b2
--- /dev/null
+++ b/res/templates-dinsic/notif_mail.html
@@ -0,0 +1,55 @@
+<!doctype html>
+<html lang="en">
+ <head>
+ <style type="text/css">
+ {% include 'mail.css' without context %}
+ {% include "mail-%s.css" % app_name ignore missing without context %}
+ </style>
+ </head>
+ <body>
+ <table id="page">
+ <tr>
+ <td> </td>
+ <td id="inner">
+ <table class="header">
+ <tr>
+ <td>
+ <div class="salutation">Bonjour {{ user_display_name }},</div>
+ <div class="summarytext">{{ summary_text }}</div>
+ </td>
+ <td class="logo">
+ {% if app_name == "Riot" %}
+ <img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+ {% elif app_name == "Vector" %}
+ <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
+ {% else %}
+ <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
+ {% endif %}
+ </td>
+ </tr>
+ </table>
+ {% for room in rooms %}
+ {% include 'room.html' with context %}
+ {% endfor %}
+ <div class="footer">
+ <a href="{{ unsubscribe_link }}">Se désinscrire</a>
+ <br/>
+ <br/>
+ <div class="debug">
+ Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
+ an event was received at {{ reason.received_at|format_ts("%c") }}
+ which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
+ {% if reason.last_sent_ts %}
+ and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
+ which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
+ {% else %}
+ and we don't have a last time we sent a mail for this room.
+ {% endif %}
+ </div>
+ </div>
+ </td>
+ <td> </td>
+ </tr>
+ </table>
+ </body>
+</html>
diff --git a/res/templates-dinsic/notif_mail.txt b/res/templates-dinsic/notif_mail.txt
new file mode 100644
index 0000000000..fae877426f
--- /dev/null
+++ b/res/templates-dinsic/notif_mail.txt
@@ -0,0 +1,10 @@
+Bonjour {{ user_display_name }},
+
+{{ summary_text }}
+
+{% for room in rooms %}
+{% include 'room.txt' with context %}
+{% endfor %}
+
+Vous pouvez désactiver ces notifications en cliquant ici {{ unsubscribe_link }}
+
diff --git a/res/templates-dinsic/room.html b/res/templates-dinsic/room.html
new file mode 100644
index 0000000000..0487b1b11c
--- /dev/null
+++ b/res/templates-dinsic/room.html
@@ -0,0 +1,33 @@
+<table class="room">
+ <tr class="room_header">
+ <td class="room_avatar">
+ {% if room.avatar_url %}
+ <img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
+ {% else %}
+ {% if room.hash % 3 == 0 %}
+ <img alt="" src="https://vector.im/beta/img/76cfa6.png" />
+ {% elif room.hash % 3 == 1 %}
+ <img alt="" src="https://vector.im/beta/img/50e2c2.png" />
+ {% else %}
+ <img alt="" src="https://vector.im/beta/img/f4c371.png" />
+ {% endif %}
+ {% endif %}
+ </td>
+ <td class="room_name" colspan="2">
+ {{ room.title }}
+ </td>
+ </tr>
+ {% if room.invite %}
+ <tr>
+ <td></td>
+ <td>
+ <a href="{{ room.link }}">Rejoindre la conversation.</a>
+ </td>
+ <td></td>
+ </tr>
+ {% else %}
+ {% for notif in room.notifs %}
+ {% include 'notif.html' with context %}
+ {% endfor %}
+ {% endif %}
+</table>
diff --git a/res/templates-dinsic/room.txt b/res/templates-dinsic/room.txt
new file mode 100644
index 0000000000..dd36d01d21
--- /dev/null
+++ b/res/templates-dinsic/room.txt
@@ -0,0 +1,9 @@
+{{ room.title }}
+
+{% if room.invite %}
+ Vous avez été invité, rejoignez la conversation en cliquant sur le lien suivant {{ room.link }}
+{% else %}
+ {% for notif in room.notifs %}
+ {% include 'notif.txt' with context %}
+ {% endfor %}
+{% endif %}
diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment
index 0ec5075e79..b8a85abe18 100755
--- a/scripts-dev/check-newsfragment
+++ b/scripts-dev/check-newsfragment
@@ -5,9 +5,9 @@
set -e
-# make sure that origin/develop is up to date
-git remote set-branches --add origin develop
-git fetch origin develop
+# make sure that origin/dinsic is up to date
+git remote set-branches --add origin dinsic
+git fetch origin dinsic
# if there are changes in the debian directory, check that the debian changelog
# has been updated
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 179644852a..da9ab6776e 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -22,9 +22,10 @@ from netaddr import IPAddress
from twisted.internet import defer
+import synapse.logging.opentracing as opentracing
import synapse.types
from synapse import event_auth
-from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.constants import EventTypes, JoinRules, Membership, UserTypes
from synapse.api.errors import (
AuthError,
Codes,
@@ -178,6 +179,7 @@ class Auth(object):
def get_public_keys(self, invite_event):
return event_auth.get_public_keys(invite_event)
+ @opentracing.trace
@defer.inlineCallbacks
def get_user_by_req(
self, request, allow_guest=False, rights="access", allow_expired=False
@@ -207,8 +209,10 @@ class Auth(object):
access_token = self.get_access_token_from_request(request)
user_id, app_service = yield self._get_appservice_user_id(request)
+
if user_id:
request.authenticated_entity = user_id
+ opentracing.set_tag("authenticated_entity", user_id)
if ip_addr and self.hs.config.track_appservice_user_ips:
yield self.store.insert_client_ip(
@@ -259,6 +263,7 @@ class Auth(object):
)
request.authenticated_entity = user.to_string()
+ opentracing.set_tag("authenticated_entity", user.to_string())
return synapse.types.create_requester(
user, token_id, is_guest, device_id, app_service=app_service
@@ -266,31 +271,35 @@ class Auth(object):
except KeyError:
raise MissingClientTokenError()
- @defer.inlineCallbacks
def _get_appservice_user_id(self, request):
app_service = self.store.get_app_service_by_token(
self.get_access_token_from_request(request)
)
+
if app_service is None:
- return (None, None)
+ return None, None
if app_service.ip_range_whitelist:
ip_address = IPAddress(self.hs.get_ip_from_request(request))
if ip_address not in app_service.ip_range_whitelist:
- return (None, None)
+ return None, None
if b"user_id" not in request.args:
- return (app_service.sender, app_service)
+ return app_service.sender, app_service
user_id = request.args[b"user_id"][0].decode("utf8")
if app_service.sender == user_id:
- return (app_service.sender, app_service)
+ return app_service.sender, app_service
if not app_service.is_interested_in_user(user_id):
raise AuthError(403, "Application service cannot masquerade as this user.")
- if not (yield self.store.get_user_by_id(user_id)):
- raise AuthError(403, "Application service has not registered this user")
- return (user_id, app_service)
+ # Let ASes manipulate nonexistent users (e.g. to shadow-register them)
+ # if not (yield self.store.get_user_by_id(user_id)):
+ # raise AuthError(
+ # 403,
+ # "Application service has not registered this user"
+ # )
+ return user_id, app_service
@defer.inlineCallbacks
def get_user_by_access_token(self, token, rights="access"):
@@ -690,7 +699,7 @@ class Auth(object):
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.check_user_was_in_room(room_id, user_id)
- return (member_event.membership, member_event.event_id)
+ return member_event.membership, member_event.event_id
except AuthError:
visibility = yield self.state.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
@@ -699,14 +708,13 @@ class Auth(object):
visibility
and visibility.content["history_visibility"] == "world_readable"
):
- return (Membership.JOIN, None)
- return
+ return Membership.JOIN, None
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
@defer.inlineCallbacks
- def check_auth_blocking(self, user_id=None, threepid=None):
+ def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
"""Checks if the user should be rejected for some external reason,
such as monthly active user limiting or global disable flag
@@ -719,6 +727,9 @@ class Auth(object):
with a MAU blocked server, normally they would be rejected but their
threepid is on the reserved list. user_id and
threepid should never be set at the same time.
+
+ user_type(str|None): If present, is used to decide whether to check against
+ certain blocking reasons like MAU.
"""
# Never fail an auth check for the server notices users or support user
@@ -756,6 +767,10 @@ class Auth(object):
self.hs.config.mau_limits_reserved_threepids, threepid
):
return
+ elif user_type == UserTypes.SUPPORT:
+ # If the user does not exist yet and is of type "support",
+ # allow registration. Support users are excluded from MAU checks.
+ return
# Else if there is no room in the MAU bucket, bail
current_mau = yield self.store.get_monthly_active_count()
if current_mau >= self.hs.config.max_mau_value:
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 3ffde0d7fc..956b86f6cf 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -85,6 +85,7 @@ class EventTypes(object):
RoomAvatar = "m.room.avatar"
RoomEncryption = "m.room.encryption"
GuestAccess = "m.room.guest_access"
+ Encryption = "m.room.encryption"
# These are used for validation
Message = "m.room.message"
@@ -94,6 +95,8 @@ class EventTypes(object):
ServerACL = "m.room.server_acl"
Pinned = "m.room.pinned_events"
+ Retention = "m.room.retention"
+
class RejectedReason(object):
AUTH_ERROR = "auth_error"
@@ -122,7 +125,8 @@ class UserTypes(object):
"""
SUPPORT = "support"
- ALL_USER_TYPES = (SUPPORT,)
+ BOT = "bot"
+ ALL_USER_TYPES = (SUPPORT, BOT)
class RelationTypes(object):
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index cf1ebf1af2..d160df0bfd 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -62,6 +63,13 @@ class Codes(object):
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
USER_DEACTIVATED = "M_USER_DEACTIVATED"
+ PASSWORD_TOO_SHORT = "M_PASSWORD_TOO_SHORT"
+ PASSWORD_NO_DIGIT = "M_PASSWORD_NO_DIGIT"
+ PASSWORD_NO_UPPERCASE = "M_PASSWORD_NO_UPPERCASE"
+ PASSWORD_NO_LOWERCASE = "M_PASSWORD_NO_LOWERCASE"
+ PASSWORD_NO_SYMBOL = "M_PASSWORD_NO_SYMBOL"
+ PASSWORD_IN_DICTIONARY = "M_PASSWORD_IN_DICTIONARY"
+ WEAK_PASSWORD = "M_WEAK_PASSWORD"
class CodeMessageException(RuntimeError):
@@ -419,6 +427,18 @@ class IncompatibleRoomVersionError(SynapseError):
return cs_error(self.msg, self.errcode, room_version=self._room_version)
+class PasswordRefusedError(SynapseError):
+ """A password has been refused, either during password reset/change or registration.
+ """
+
+ def __init__(
+ self,
+ msg="This password doesn't comply with the server's policy",
+ errcode=Codes.WEAK_PASSWORD,
+ ):
+ super(PasswordRefusedError, self).__init__(code=400, msg=msg, errcode=errcode)
+
+
class RequestSendFailed(RuntimeError):
"""Sending a HTTP request over federation failed due to not being able to
talk to the remote server for some reason.
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 69dcf3523f..c30fdeee9a 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -36,18 +36,20 @@ from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
+# list of tuples of function, args list, kwargs dict
_sighup_callbacks = []
-def register_sighup(func):
+def register_sighup(func, *args, **kwargs):
"""
Register a function to be called when a SIGHUP occurs.
Args:
func (function): Function to be called when sent a SIGHUP signal.
- Will be called with a single argument, the homeserver.
+ Will be called with a single default argument, the homeserver.
+ *args, **kwargs: args and kwargs to be passed to the target function.
"""
- _sighup_callbacks.append(func)
+ _sighup_callbacks.append((func, args, kwargs))
def start_worker_reactor(appname, config, run_command=reactor.run):
@@ -248,8 +250,8 @@ def start(hs, listeners=None):
# we're not using systemd.
sdnotify(b"RELOADING=1")
- for i in _sighup_callbacks:
- i(hs)
+ for i, args, kwargs in _sighup_callbacks:
+ i(hs, *args, **kwargs)
sdnotify(b"READY=1")
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 1fd52a5526..04751a6a5e 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -227,8 +227,6 @@ def start(config_options):
config.start_pushers = False
config.send_federation = False
- setup_logging(config, use_worker_options=True)
-
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -241,6 +239,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
# We use task.react as the basic run command as it correctly handles tearing
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index 54bb114dec..767b87d2db 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -141,8 +141,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.appservice"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -167,6 +165,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ps, config, use_worker_options=True)
+
ps.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ps, config.worker_listeners
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 721bb5b119..dbcc414c42 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -119,7 +119,7 @@ class ClientReaderServer(HomeServer):
KeyChangesServlet(self).register(resource)
VoipRestServlet(self).register(resource)
PushRuleRestServlet(self).register(resource)
- VersionsRestServlet().register(resource)
+ VersionsRestServlet(self).register(resource)
resources.update({"/_matrix/client": resource})
@@ -179,8 +179,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.client_reader"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -193,6 +191,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index 473c8895d0..c67fe69a50 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -175,8 +175,6 @@ def start(config_options):
assert config.worker_replication_http_port is not None
- setup_logging(config, use_worker_options=True)
-
# This should only be done on the user directory worker or the master
config.update_user_directory = False
@@ -192,6 +190,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 5255d9e8cc..1ef027a88c 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -160,8 +160,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.federation_reader"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -174,6 +172,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index c5a2880e69..04fbb407af 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -171,8 +171,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.federation_sender"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -197,6 +195,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index e2822ca848..9504bfbc70 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -70,12 +70,12 @@ class PresenceStatusStubServlet(RestServlet):
except HttpResponseException as e:
raise e.to_synapse_error()
- return (200, result)
+ return 200, result
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
yield self.auth.get_user_by_req(request)
- return (200, {})
+ return 200, {}
class KeyUploadServlet(RestServlet):
@@ -126,11 +126,11 @@ class KeyUploadServlet(RestServlet):
self.main_uri + request.uri.decode("ascii"), body, headers=headers
)
- return (200, result)
+ return 200, result
else:
# Just interested in counts.
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
- return (200, {"one_time_key_counts": result})
+ return 200, {"one_time_key_counts": result}
class FrontendProxySlavedStore(
@@ -232,8 +232,6 @@ def start(config_options):
assert config.worker_main_http_uri is not None
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -246,6 +244,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 8233905844..3f31bf9490 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -341,8 +341,6 @@ def setup(config_options):
# generating config files and shouldn't try to continue.
sys.exit(0)
- synapse.config.logger.setup_logging(config, use_worker_options=False)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -356,6 +354,8 @@ def setup(config_options):
database_engine=database_engine,
)
+ synapse.config.logger.setup_logging(hs, config, use_worker_options=False)
+
logger.info("Preparing database: %s...", config.database_config["name"])
try:
@@ -561,10 +561,12 @@ def run(hs):
stats["database_engine"] = hs.get_datastore().database_engine_name
stats["database_server_version"] = hs.get_datastore().get_server_version()
- logger.info("Reporting stats to matrix.org: %s" % (stats,))
+ logger.info(
+ "Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)
+ )
try:
- yield hs.get_simple_http_client().put_json(
- "https://matrix.org/report-usage-stats/push", stats
+ yield hs.get_proxied_http_client().put_json(
+ hs.config.report_stats_endpoint, stats
)
except Exception as e:
logger.warn("Error reporting stats: %s", e)
diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index 3a168577c7..2ac783ffa3 100644
--- a/synapse/app/media_repository.py
+++ b/synapse/app/media_repository.py
@@ -155,8 +155,6 @@ def start(config_options):
"Please add ``enable_media_repo: false`` to the main config\n"
)
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -169,6 +167,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 692ffa2f04..d84732ee3c 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -184,8 +184,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.pusher"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
if config.start_pushers:
@@ -210,6 +208,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ps, config, use_worker_options=True)
+
ps.setup()
def start():
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index a1c3b162f7..473026fce5 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -435,8 +435,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.synchrotron"
- setup_logging(config, use_worker_options=True)
-
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -450,6 +448,8 @@ def start(config_options):
application_service_handler=SynchrotronApplicationService(),
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index cb29a1afab..e01afb39f2 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -197,8 +197,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.user_dir"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -223,6 +221,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 33b3579425..65cbff95b9 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -268,7 +268,7 @@ class ApplicationService(object):
def is_exclusive_room(self, room_id):
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
- def get_exlusive_user_regexes(self):
+ def get_exclusive_user_regexes(self):
"""Get the list of regexes used to determine if a user is exclusively
registered by the AS
"""
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 007ca75a94..3e25bf5747 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -107,7 +107,6 @@ class ApplicationServiceApi(SimpleHttpClient):
except CodeMessageException as e:
if e.code == 404:
return False
- return
logger.warning("query_user to %s received %s", uri, e.code)
except Exception as ex:
logger.warning("query_user to %s threw exception %s", uri, ex)
@@ -127,7 +126,6 @@ class ApplicationServiceApi(SimpleHttpClient):
logger.warning("query_alias to %s received %s", uri, e.code)
if e.code == 404:
return False
- return
except Exception as ex:
logger.warning("query_alias to %s threw exception %s", uri, ex)
return False
@@ -230,7 +228,6 @@ class ApplicationServiceApi(SimpleHttpClient):
sent_transactions_counter.labels(service.id).inc()
sent_events_counter.labels(service.id).inc(len(events))
return True
- return
except CodeMessageException as e:
logger.warning("push_bulk to %s received %s", uri, e.code)
except Exception as ex:
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 42a350bff8..9998f822f1 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -70,35 +70,37 @@ class ApplicationServiceScheduler(object):
self.store = hs.get_datastore()
self.as_api = hs.get_application_service_api()
- def create_recoverer(service, callback):
- return _Recoverer(self.clock, self.store, self.as_api, service, callback)
-
- self.txn_ctrl = _TransactionController(
- self.clock, self.store, self.as_api, create_recoverer
- )
+ self.txn_ctrl = _TransactionController(self.clock, self.store, self.as_api)
self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
@defer.inlineCallbacks
def start(self):
logger.info("Starting appservice scheduler")
+
# check for any DOWN ASes and start recoverers for them.
- recoverers = yield _Recoverer.start(
- self.clock, self.store, self.as_api, self.txn_ctrl.on_recovered
+ services = yield self.store.get_appservices_by_state(
+ ApplicationServiceState.DOWN
)
- self.txn_ctrl.add_recoverers(recoverers)
+
+ for service in services:
+ self.txn_ctrl.start_recoverer(service)
def submit_event_for_as(self, service, event):
self.queuer.enqueue(service, event)
class _ServiceQueuer(object):
- """Queues events for the same application service together, sending
- transactions as soon as possible. Once a transaction is sent successfully,
- this schedules any other events in the queue to run.
+ """Queue of events waiting to be sent to appservices.
+
+ Groups events into transactions per-appservice, and sends them on to the
+ TransactionController. Makes sure that we only have one transaction in flight per
+ appservice at a given time.
"""
def __init__(self, txn_ctrl, clock):
self.queued_events = {} # dict of {service_id: [events]}
+
+ # the appservices which currently have a transaction in flight
self.requests_in_flight = set()
self.txn_ctrl = txn_ctrl
self.clock = clock
@@ -136,13 +138,29 @@ class _ServiceQueuer(object):
class _TransactionController(object):
- def __init__(self, clock, store, as_api, recoverer_fn):
+ """Transaction manager.
+
+ Builds AppServiceTransactions and runs their lifecycle. Also starts a Recoverer
+ if a transaction fails.
+
+ (Note we have only have one of these in the homeserver.)
+
+ Args:
+ clock (synapse.util.Clock):
+ store (synapse.storage.DataStore):
+ as_api (synapse.appservice.api.ApplicationServiceApi):
+ """
+
+ def __init__(self, clock, store, as_api):
self.clock = clock
self.store = store
self.as_api = as_api
- self.recoverer_fn = recoverer_fn
- # keep track of how many recoverers there are
- self.recoverers = []
+
+ # map from service id to recoverer instance
+ self.recoverers = {}
+
+ # for UTs
+ self.RECOVERER_CLASS = _Recoverer
@defer.inlineCallbacks
def send(self, service, events):
@@ -154,42 +172,45 @@ class _TransactionController(object):
if sent:
yield txn.complete(self.store)
else:
- run_in_background(self._start_recoverer, service)
+ run_in_background(self._on_txn_fail, service)
except Exception:
logger.exception("Error creating appservice transaction")
- run_in_background(self._start_recoverer, service)
+ run_in_background(self._on_txn_fail, service)
@defer.inlineCallbacks
def on_recovered(self, recoverer):
- self.recoverers.remove(recoverer)
logger.info(
"Successfully recovered application service AS ID %s", recoverer.service.id
)
+ self.recoverers.pop(recoverer.service.id)
logger.info("Remaining active recoverers: %s", len(self.recoverers))
yield self.store.set_appservice_state(
recoverer.service, ApplicationServiceState.UP
)
- def add_recoverers(self, recoverers):
- for r in recoverers:
- self.recoverers.append(r)
- if len(recoverers) > 0:
- logger.info("New active recoverers: %s", len(self.recoverers))
-
@defer.inlineCallbacks
- def _start_recoverer(self, service):
+ def _on_txn_fail(self, service):
try:
yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
- logger.info(
- "Application service falling behind. Starting recoverer. AS ID %s",
- service.id,
- )
- recoverer = self.recoverer_fn(service, self.on_recovered)
- self.add_recoverers([recoverer])
- recoverer.recover()
+ self.start_recoverer(service)
except Exception:
logger.exception("Error starting AS recoverer")
+ def start_recoverer(self, service):
+ """Start a Recoverer for the given service
+
+ Args:
+ service (synapse.appservice.ApplicationService):
+ """
+ logger.info("Starting recoverer for AS ID %s", service.id)
+ assert service.id not in self.recoverers
+ recoverer = self.RECOVERER_CLASS(
+ self.clock, self.store, self.as_api, service, self.on_recovered
+ )
+ self.recoverers[service.id] = recoverer
+ recoverer.recover()
+ logger.info("Now %i active recoverers", len(self.recoverers))
+
@defer.inlineCallbacks
def _is_service_up(self, service):
state = yield self.store.get_appservice_state(service)
@@ -197,18 +218,17 @@ class _TransactionController(object):
class _Recoverer(object):
- @staticmethod
- @defer.inlineCallbacks
- def start(clock, store, as_api, callback):
- services = yield store.get_appservices_by_state(ApplicationServiceState.DOWN)
- recoverers = [_Recoverer(clock, store, as_api, s, callback) for s in services]
- for r in recoverers:
- logger.info(
- "Starting recoverer for AS ID %s which was marked as " "DOWN",
- r.service.id,
- )
- r.recover()
- return recoverers
+ """Manages retries and backoff for a DOWN appservice.
+
+ We have one of these for each appservice which is currently considered DOWN.
+
+ Args:
+ clock (synapse.util.Clock):
+ store (synapse.storage.DataStore):
+ as_api (synapse.appservice.api.ApplicationServiceApi):
+ service (synapse.appservice.ApplicationService): the service we are managing
+ callback (callable[_Recoverer]): called once the service recovers.
+ """
def __init__(self, clock, store, as_api, service, callback):
self.clock = clock
@@ -224,7 +244,9 @@ class _Recoverer(object):
"as-recoverer-%s" % (self.service.id,), self.retry
)
- self.clock.call_later((2 ** self.backoff_counter), _retry)
+ delay = 2 ** self.backoff_counter
+ logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
+ self.clock.call_later(delay, _retry)
def _backoff(self):
# cap the backoff to be around 8.5min => (2^9) = 512 secs
@@ -234,25 +256,30 @@ class _Recoverer(object):
@defer.inlineCallbacks
def retry(self):
+ logger.info("Starting retries on %s", self.service.id)
try:
- txn = yield self.store.get_oldest_unsent_txn(self.service)
- if txn:
+ while True:
+ txn = yield self.store.get_oldest_unsent_txn(self.service)
+ if not txn:
+ # nothing left: we're done!
+ self.callback(self)
+ return
+
logger.info(
"Retrying transaction %s for AS ID %s", txn.id, txn.service.id
)
sent = yield txn.send(self.as_api)
- if sent:
- yield txn.complete(self.store)
- # reset the backoff counter and retry immediately
- self.backoff_counter = 1
- yield self.retry()
- else:
- self._backoff()
- else:
- self._set_service_recovered()
- except Exception as e:
- logger.exception(e)
- self._backoff()
-
- def _set_service_recovered(self):
- self.callback(self)
+ if not sent:
+ break
+
+ yield txn.complete(self.store)
+
+ # reset the backoff counter and then process the next transaction
+ self.backoff_counter = 1
+
+ except Exception:
+ logger.exception("Unexpected error running retries")
+
+ # we didn't manage to send all of the transactions before we got an error of
+ # some flavour: reschedule the next retry.
+ self._backoff()
diff --git a/synapse/config/__init__.py b/synapse/config/__init__.py
index f2a5a41e92..1e76e9559d 100644
--- a/synapse/config/__init__.py
+++ b/synapse/config/__init__.py
@@ -13,8 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from ._base import ConfigError
+from ._base import ConfigError, find_config_files
-# export ConfigError if somebody does import *
+# export ConfigError and find_config_files if somebody does
+# import *
# this is largely a fudge to stop PEP8 moaning about the import
-__all__ = ["ConfigError"]
+__all__ = ["ConfigError", "find_config_files"]
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 6ce5cd07fb..2674a62aa5 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -18,6 +18,7 @@
import argparse
import errno
import os
+from io import open as io_open
from textwrap import dedent
from six import integer_types
@@ -133,7 +134,7 @@ class Config(object):
@classmethod
def read_file(cls, file_path, config_name):
cls.check_file(file_path, config_name)
- with open(file_path) as file_stream:
+ with io_open(file_path, encoding="utf-8") as file_stream:
return file_stream.read()
def invoke_all(self, name, *args, **kargs):
@@ -181,6 +182,11 @@ class Config(object):
generate_secrets=False,
report_stats=None,
open_private_ports=False,
+ listeners=None,
+ database_conf=None,
+ tls_certificate_path=None,
+ tls_private_key_path=None,
+ acme_domain=None,
):
"""Build a default configuration file
@@ -207,6 +213,33 @@ class Config(object):
open_private_ports (bool): True to leave private ports (such as the non-TLS
HTTP listener) open to the internet.
+ listeners (list(dict)|None): A list of descriptions of the listeners
+ synapse should start with each of which specifies a port (str), a list of
+ resources (list(str)), tls (bool) and type (str). For example:
+ [{
+ "port": 8448,
+ "resources": [{"names": ["federation"]}],
+ "tls": True,
+ "type": "http",
+ },
+ {
+ "port": 443,
+ "resources": [{"names": ["client"]}],
+ "tls": False,
+ "type": "http",
+ }],
+
+
+ database (str|None): The database type to configure, either `psycog2`
+ or `sqlite3`.
+
+ tls_certificate_path (str|None): The path to the tls certificate.
+
+ tls_private_key_path (str|None): The path to the tls private key.
+
+ acme_domain (str|None): The domain acme will try to validate. If
+ specified acme will be enabled.
+
Returns:
str: the yaml config file
"""
@@ -220,6 +253,11 @@ class Config(object):
generate_secrets=generate_secrets,
report_stats=report_stats,
open_private_ports=open_private_ports,
+ listeners=listeners,
+ database_conf=database_conf,
+ tls_certificate_path=tls_certificate_path,
+ tls_private_key_path=tls_private_key_path,
+ acme_domain=acme_domain,
)
)
diff --git a/synapse/config/database.py b/synapse/config/database.py
index 746a6cd1f4..118aafbd4a 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -13,6 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
+from textwrap import indent
+
+import yaml
from ._base import Config
@@ -38,20 +41,28 @@ class DatabaseConfig(Config):
self.set_databasepath(config.get("database_path"))
- def generate_config_section(self, data_dir_path, **kwargs):
- database_path = os.path.join(data_dir_path, "homeserver.db")
- return (
- """\
- ## Database ##
-
- database:
- # The database engine name
+ def generate_config_section(self, data_dir_path, database_conf, **kwargs):
+ if not database_conf:
+ database_path = os.path.join(data_dir_path, "homeserver.db")
+ database_conf = (
+ """# The database engine name
name: "sqlite3"
# Arguments to pass to the engine
args:
# Path to the database
database: "%(database_path)s"
+ """
+ % locals()
+ )
+ else:
+ database_conf = indent(yaml.dump(database_conf), " " * 10).lstrip()
+
+ return (
+ """\
+ ## Database ##
+ database:
+ %(database_conf)s
# Number of events to cache in memory.
#
#event_cache_size: 10K
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 8381b8eb29..e5de768b0c 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -20,6 +20,7 @@ from __future__ import print_function
# This file can't be called email.py because if it is, we cannot:
import email.utils
import os
+from enum import Enum
import pkg_resources
@@ -74,19 +75,48 @@ class EmailConfig(Config):
"renew_at"
)
- email_trust_identity_server_for_password_resets = email_config.get(
- "trust_identity_server_for_password_resets", False
+ self.threepid_behaviour_email = (
+ # Have Synapse handle the email sending if account_threepid_delegates.email
+ # is not defined
+ # msisdn is currently always remote while Synapse does not support any method of
+ # sending SMS messages
+ ThreepidBehaviour.REMOTE
+ if self.account_threepid_delegate_email
+ else ThreepidBehaviour.LOCAL
)
- self.email_password_reset_behaviour = (
- "remote" if email_trust_identity_server_for_password_resets else "local"
- )
- self.password_resets_were_disabled_due_to_email_config = False
- if self.email_password_reset_behaviour == "local" and email_config == {}:
+ # Prior to Synapse v1.4.0, there was another option that defined whether Synapse would
+ # use an identity server to password reset tokens on its behalf. We now warn the user
+ # if they have this set and tell them to use the updated option, while using a default
+ # identity server in the process.
+ self.using_identity_server_from_trusted_list = False
+ if (
+ not self.account_threepid_delegate_email
+ and config.get("trust_identity_server_for_password_resets", False) is True
+ ):
+ # Use the first entry in self.trusted_third_party_id_servers instead
+ if self.trusted_third_party_id_servers:
+ # XXX: It's a little confusing that account_threepid_delegate_email is modified
+ # both in RegistrationConfig and here. We should factor this bit out
+ self.account_threepid_delegate_email = self.trusted_third_party_id_servers[
+ 0
+ ]
+ self.using_identity_server_from_trusted_list = True
+ else:
+ raise ConfigError(
+ "Attempted to use an identity server from"
+ '"trusted_third_party_id_servers" but it is empty.'
+ )
+
+ self.local_threepid_handling_disabled_due_to_email_config = False
+ if (
+ self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
+ and email_config == {}
+ ):
# We cannot warn the user this has happened here
# Instead do so when a user attempts to reset their password
- self.password_resets_were_disabled_due_to_email_config = True
+ self.local_threepid_handling_disabled_due_to_email_config = True
- self.email_password_reset_behaviour = "off"
+ self.threepid_behaviour_email = ThreepidBehaviour.OFF
# Get lifetime of a validation token in milliseconds
self.email_validation_token_lifetime = self.parse_duration(
@@ -96,7 +126,7 @@ class EmailConfig(Config):
if (
self.email_enable_notifs
or account_validity_renewal_enabled
- or self.email_password_reset_behaviour == "local"
+ or self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
):
# make sure we can import the required deps
import jinja2
@@ -106,7 +136,7 @@ class EmailConfig(Config):
jinja2
bleach
- if self.email_password_reset_behaviour == "local":
+ if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
required = ["smtp_host", "smtp_port", "notif_from"]
missing = []
@@ -115,7 +145,7 @@ class EmailConfig(Config):
missing.append("email." + k)
if config.get("public_baseurl") is None:
- missing.append("public_base_url")
+ missing.append("public_baseurl")
if len(missing) > 0:
raise RuntimeError(
@@ -125,28 +155,45 @@ class EmailConfig(Config):
% (", ".join(missing),)
)
- # Templates for password reset emails
+ # These email templates have placeholders in them, and thus must be
+ # parsed using a templating engine during a request
self.email_password_reset_template_html = email_config.get(
"password_reset_template_html", "password_reset.html"
)
self.email_password_reset_template_text = email_config.get(
"password_reset_template_text", "password_reset.txt"
)
- self.email_password_reset_failure_template = email_config.get(
- "password_reset_failure_template", "password_reset_failure.html"
+ self.email_registration_template_html = email_config.get(
+ "registration_template_html", "registration.html"
)
- # This template does not support any replaceable variables, so we will
- # read it from the disk once during setup
- email_password_reset_success_template = email_config.get(
- "password_reset_success_template", "password_reset_success.html"
+ self.email_registration_template_text = email_config.get(
+ "registration_template_text", "registration.txt"
+ )
+ self.email_password_reset_template_failure_html = email_config.get(
+ "password_reset_template_failure_html", "password_reset_failure.html"
+ )
+ self.email_registration_template_failure_html = email_config.get(
+ "registration_template_failure_html", "registration_failure.html"
+ )
+
+ # These templates do not support any placeholder variables, so we
+ # will read them from disk once during setup
+ email_password_reset_template_success_html = email_config.get(
+ "password_reset_template_success_html", "password_reset_success.html"
+ )
+ email_registration_template_success_html = email_config.get(
+ "registration_template_success_html", "registration_success.html"
)
# Check templates exist
for f in [
self.email_password_reset_template_html,
self.email_password_reset_template_text,
- self.email_password_reset_failure_template,
- email_password_reset_success_template,
+ self.email_registration_template_html,
+ self.email_registration_template_text,
+ self.email_password_reset_template_failure_html,
+ email_password_reset_template_success_html,
+ email_registration_template_success_html,
]:
p = os.path.join(self.email_template_dir, f)
if not os.path.isfile(p):
@@ -154,11 +201,17 @@ class EmailConfig(Config):
# Retrieve content of web templates
filepath = os.path.join(
- self.email_template_dir, email_password_reset_success_template
+ self.email_template_dir, email_password_reset_template_success_html
)
- self.email_password_reset_success_html_content = self.read_file(
+ self.email_password_reset_template_success_html = self.read_file(
filepath, "email.password_reset_template_success_html"
)
+ filepath = os.path.join(
+ self.email_template_dir, email_registration_template_success_html
+ )
+ self.email_registration_template_success_html_content = self.read_file(
+ filepath, "email.registration_template_success_html"
+ )
if self.email_enable_notifs:
required = [
@@ -239,19 +292,6 @@ class EmailConfig(Config):
# #
# riot_base_url: "http://localhost/riot"
#
- # # Enable sending password reset emails via the configured, trusted
- # # identity servers
- # #
- # # IMPORTANT! This will give a malicious or overtaken identity server
- # # the ability to reset passwords for your users! Make absolutely sure
- # # that you want to do this! It is strongly recommended that password
- # # reset emails be sent by the homeserver instead
- # #
- # # If this option is set to false and SMTP options have not been
- # # configured, resetting user passwords via email will be disabled
- # #
- # #trust_identity_server_for_password_resets: false
- #
# # Configure the time that a validation email or text message code
# # will expire after sending
# #
@@ -283,9 +323,35 @@ class EmailConfig(Config):
# #password_reset_template_html: password_reset.html
# #password_reset_template_text: password_reset.txt
#
+ # # Templates for registration emails sent by the homeserver
+ # #
+ # #registration_template_html: registration.html
+ # #registration_template_text: registration.txt
+ #
# # Templates for password reset success and failure pages that a user
# # will see after attempting to reset their password
# #
# #password_reset_template_success_html: password_reset_success.html
# #password_reset_template_failure_html: password_reset_failure.html
+ #
+ # # Templates for registration success and failure pages that a user
+ # # will see after attempting to register using an email or phone
+ # #
+ # #registration_template_success_html: registration_success.html
+ # #registration_template_failure_html: registration_failure.html
"""
+
+
+class ThreepidBehaviour(Enum):
+ """
+ Enum to define the behaviour of Synapse with regards to when it contacts an identity
+ server for 3pid registration and password resets
+
+ REMOTE = use an external server to send tokens
+ LOCAL = send tokens ourselves
+ OFF = disable registration via 3pid and password resets
+ """
+
+ REMOTE = "remote"
+ LOCAL = "local"
+ OFF = "off"
diff --git a/synapse/config/key.py b/synapse/config/key.py
index fe8386985c..ba2199bceb 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -76,7 +76,7 @@ class KeyConfig(Config):
config_dir_path, config["server_name"] + ".signing.key"
)
- self.signing_key = self.read_signing_key(signing_key_path)
+ self.signing_key = self.read_signing_keys(signing_key_path, "signing_key")
self.old_signing_keys = self.read_old_signing_keys(
config.get("old_signing_keys", {})
@@ -85,6 +85,14 @@ class KeyConfig(Config):
config.get("key_refresh_interval", "1d")
)
+ key_server_signing_keys_path = config.get("key_server_signing_keys_path")
+ if key_server_signing_keys_path:
+ self.key_server_signing_keys = self.read_signing_keys(
+ key_server_signing_keys_path, "key_server_signing_keys_path"
+ )
+ else:
+ self.key_server_signing_keys = list(self.signing_key)
+
# if neither trusted_key_servers nor perspectives are given, use the default.
if "perspectives" not in config and "trusted_key_servers" not in config:
key_servers = [{"server_name": "matrix.org"}]
@@ -210,16 +218,34 @@ class KeyConfig(Config):
#
#trusted_key_servers:
# - server_name: "matrix.org"
+ #
+
+ # The signing keys to use when acting as a trusted key server. If not specified
+ # defaults to the server signing key.
+ #
+ # Can contain multiple keys, one per line.
+ #
+ #key_server_signing_keys_path: "key_server_signing_keys.key"
"""
% locals()
)
- def read_signing_key(self, signing_key_path):
- signing_keys = self.read_file(signing_key_path, "signing_key")
+ def read_signing_keys(self, signing_key_path, name):
+ """Read the signing keys in the given path.
+
+ Args:
+ signing_key_path (str)
+ name (str): Associated config key name
+
+ Returns:
+ list[SigningKey]
+ """
+
+ signing_keys = self.read_file(signing_key_path, name)
try:
return read_signing_keys(signing_keys.splitlines(True))
except Exception as e:
- raise ConfigError("Error reading signing_key: %s" % (str(e)))
+ raise ConfigError("Error reading %s: %s" % (name, str(e)))
def read_old_signing_keys(self, old_signing_keys):
keys = {}
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index d321d00b80..767ecfdf09 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -21,10 +21,19 @@ from string import Template
import yaml
-from twisted.logger import STDLibLogObserver, globalLogBeginner
+from twisted.logger import (
+ ILogObserver,
+ LogBeginner,
+ STDLibLogObserver,
+ globalLogBeginner,
+)
import synapse
from synapse.app import _base as appbase
+from synapse.logging._structured import (
+ reload_structured_logging,
+ setup_structured_logging,
+)
from synapse.logging.context import LoggingContextFilter
from synapse.util.versionstring import get_version_string
@@ -85,7 +94,8 @@ class LoggingConfig(Config):
"""\
## Logging ##
- # A yaml python logging config file
+ # A yaml python logging config file as described by
+ # https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
#
log_config: "%(log_config)s"
"""
@@ -119,21 +129,10 @@ class LoggingConfig(Config):
log_config_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=log_file))
-def setup_logging(config, use_worker_options=False):
- """ Set up python logging
-
- Args:
- config (LoggingConfig | synapse.config.workers.WorkerConfig):
- configuration data
-
- use_worker_options (bool): True to use the 'worker_log_config' option
- instead of 'log_config'.
-
- register_sighup (func | None): Function to call to register a
- sighup handler.
+def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner):
+ """
+ Set up Python stdlib logging.
"""
- log_config = config.worker_log_config if use_worker_options else config.log_config
-
if log_config is None:
log_format = (
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
@@ -151,35 +150,10 @@ def setup_logging(config, use_worker_options=False):
handler.addFilter(LoggingContextFilter(request=""))
logger.addHandler(handler)
else:
+ logging.config.dictConfig(log_config)
- def load_log_config():
- with open(log_config, "r") as f:
- logging.config.dictConfig(yaml.safe_load(f))
-
- def sighup(*args):
- # it might be better to use a file watcher or something for this.
- load_log_config()
- logging.info("Reloaded log config from %s due to SIGHUP", log_config)
-
- load_log_config()
- appbase.register_sighup(sighup)
-
- # make sure that the first thing we log is a thing we can grep backwards
- # for
- logging.warn("***** STARTING SERVER *****")
- logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
- logging.info("Server hostname: %s", config.server_name)
-
- # It's critical to point twisted's internal logging somewhere, otherwise it
- # stacks up and leaks kup to 64K object;
- # see: https://twistedmatrix.com/trac/ticket/8164
- #
- # Routing to the python logging framework could be a performance problem if
- # the handlers blocked for a long time as python.logging is a blocking API
- # see https://twistedmatrix.com/documents/current/core/howto/logger.html
- # filed as https://github.com/matrix-org/synapse/issues/1727
- #
- # However this may not be too much of a problem if we are just writing to a file.
+ # Route Twisted's native logging through to the standard library logging
+ # system.
observer = STDLibLogObserver()
def _log(event):
@@ -196,8 +170,71 @@ def setup_logging(config, use_worker_options=False):
return observer(event)
- globalLogBeginner.beginLoggingTo(
- [_log], redirectStandardIO=not config.no_redirect_stdio
- )
+ logBeginner.beginLoggingTo([_log], redirectStandardIO=not config.no_redirect_stdio)
if not config.no_redirect_stdio:
print("Redirected stdout/stderr to logs")
+
+ return observer
+
+
+def _reload_stdlib_logging(*args, log_config=None):
+ logger = logging.getLogger("")
+
+ if not log_config:
+ logger.warn("Reloaded a blank config?")
+
+ logging.config.dictConfig(log_config)
+
+
+def setup_logging(
+ hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner
+) -> ILogObserver:
+ """
+ Set up the logging subsystem.
+
+ Args:
+ config (LoggingConfig | synapse.config.workers.WorkerConfig):
+ configuration data
+
+ use_worker_options (bool): True to use the 'worker_log_config' option
+ instead of 'log_config'.
+
+ logBeginner: The Twisted logBeginner to use.
+
+ Returns:
+ The "root" Twisted Logger observer, suitable for sending logs to from a
+ Logger instance.
+ """
+ log_config = config.worker_log_config if use_worker_options else config.log_config
+
+ def read_config(*args, callback=None):
+ if log_config is None:
+ return None
+
+ with open(log_config, "rb") as f:
+ log_config_body = yaml.safe_load(f.read())
+
+ if callback:
+ callback(log_config=log_config_body)
+ logging.info("Reloaded log config from %s due to SIGHUP", log_config)
+
+ return log_config_body
+
+ log_config_body = read_config()
+
+ if log_config_body and log_config_body.get("structured") is True:
+ logger = setup_structured_logging(
+ hs, config, log_config_body, logBeginner=logBeginner
+ )
+ appbase.register_sighup(read_config, callback=reload_structured_logging)
+ else:
+ logger = _setup_stdlib_logging(config, log_config_body, logBeginner=logBeginner)
+ appbase.register_sighup(read_config, callback=_reload_stdlib_logging)
+
+ # make sure that the first thing we log is a thing we can grep backwards
+ # for
+ logging.warn("***** STARTING SERVER *****")
+ logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
+ logging.info("Server hostname: %s", config.server_name)
+
+ return logger
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index 3698441963..ec35a6b868 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,26 +14,47 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import attr
+
+from synapse.python_dependencies import DependencyException, check_requirements
+
from ._base import Config, ConfigError
-MISSING_SENTRY = """Missing sentry-sdk library. This is required to enable sentry
- integration.
- """
+
+@attr.s
+class MetricsFlags(object):
+ known_servers = attr.ib(default=False, validator=attr.validators.instance_of(bool))
+
+ @classmethod
+ def all_off(cls):
+ """
+ Instantiate the flags with all options set to off.
+ """
+ return cls(**{x.name: False for x in attr.fields(cls)})
class MetricsConfig(Config):
def read_config(self, config, **kwargs):
self.enable_metrics = config.get("enable_metrics", False)
self.report_stats = config.get("report_stats", None)
+ self.report_stats_endpoint = config.get(
+ "report_stats_endpoint", "https://matrix.org/report-usage-stats/push"
+ )
self.metrics_port = config.get("metrics_port")
self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
+ if self.enable_metrics:
+ _metrics_config = config.get("metrics_flags") or {}
+ self.metrics_flags = MetricsFlags(**_metrics_config)
+ else:
+ self.metrics_flags = MetricsFlags.all_off()
+
self.sentry_enabled = "sentry" in config
if self.sentry_enabled:
try:
- import sentry_sdk # noqa F401
- except ImportError:
- raise ConfigError(MISSING_SENTRY)
+ check_requirements("sentry")
+ except DependencyException as e:
+ raise ConfigError(e.message)
self.sentry_dsn = config["sentry"].get("dsn")
if not self.sentry_dsn:
@@ -58,6 +80,16 @@ class MetricsConfig(Config):
#sentry:
# dsn: "..."
+ # Flags to enable Prometheus metrics which are not suitable to be
+ # enabled by default, either for performance reasons or limited use.
+ #
+ metrics_flags:
+ # Publish synapse_federation_known_servers, a g auge of the number of
+ # servers this homeserver knows about, including itself. May cause
+ # performance problems on large homeservers.
+ #
+ #known_servers: true
+
# Whether or not to report anonymized homeserver usage statistics.
"""
@@ -66,4 +98,10 @@ class MetricsConfig(Config):
else:
res += "report_stats: %s\n" % ("true" if report_stats else "false")
+ res += """
+ # The endpoint to report the anonymized homeserver usage statistics to.
+ # Defaults to https://matrix.org/report-usage-stats/push
+ #
+ #report_stats_endpoint: https://example.com/report-usage-stats/push
+ """
return res
diff --git a/synapse/config/password.py b/synapse/config/password.py
index d5b5953f2f..47df98f41a 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
-# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2015-2016 OpenMarket Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,6 +31,10 @@ class PasswordConfig(Config):
self.password_localdb_enabled = password_config.get("localdb_enabled", True)
self.password_pepper = password_config.get("pepper", "")
+ # Password policy
+ self.password_policy = password_config.get("policy", {})
+ self.password_policy_enabled = self.password_policy.pop("enabled", False)
+
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """\
password_config:
@@ -46,4 +52,34 @@ class PasswordConfig(Config):
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
#
#pepper: "EVEN_MORE_SECRET"
+
+ # Define and enforce a password policy. Each parameter is optional, boolean
+ # parameters default to 'false' and integer parameters default to 0.
+ # This is an early implementation of MSC2000.
+ #
+ #policy:
+ # Whether to enforce the password policy.
+ #
+ #enabled: true
+
+ # Minimum accepted length for a password.
+ #
+ #minimum_length: 15
+
+ # Whether a password must contain at least one digit.
+ #
+ #require_digit: true
+
+ # Whether a password must contain at least one symbol.
+ # A symbol is any character that's not a number or a letter.
+ #
+ #require_symbol: true
+
+ # Whether a password must contain at least one lowercase letter.
+ #
+ #require_lowercase: true
+
+ # Whether a password must contain at least one lowercase letter.
+ #
+ #require_uppercase: true
"""
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 33f31cf213..65fd8309e0 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -68,6 +68,9 @@ class RatelimitConfig(Config):
)
self.rc_registration = RateLimitConfig(config.get("rc_registration", {}))
+ self.rc_third_party_invite = RateLimitConfig(
+ config.get("rc_third_party_invite", {})
+ )
rc_login_config = config.get("rc_login", {})
self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {}))
@@ -80,6 +83,12 @@ class RatelimitConfig(Config):
"federation_rr_transactions_per_room_per_second", 50
)
+ rc_admin_redaction = config.get("rc_admin_redaction")
+ if rc_admin_redaction:
+ self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction)
+ else:
+ self.rc_admin_redaction = None
+
def generate_config_section(self, **kwargs):
return """\
## Ratelimiting ##
@@ -102,6 +111,11 @@ class RatelimitConfig(Config):
# - one for login that ratelimits login requests based on the account the
# client is attempting to log into, based on the amount of failed login
# attempts for this account.
+ # - one that ratelimits third-party invites requests based on the account
+ # that's making the requests.
+ # - one for ratelimiting redactions by room admins. If this is not explicitly
+ # set then it uses the same ratelimiting as per rc_message. This is useful
+ # to allow room admins to deal with abuse quickly.
#
# The defaults are as shown below.
#
@@ -123,6 +137,14 @@ class RatelimitConfig(Config):
# failed_attempts:
# per_second: 0.17
# burst_count: 3
+ #
+ #rc_third_party_invite:
+ # per_second: 0.2
+ # burst_count: 10
+ #
+ #rc_admin_redaction:
+ # per_second: 1
+ # burst_count: 50
# Ratelimiting settings for incoming federation
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index e2bee3c116..8766917ddd 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -92,13 +92,28 @@ class RegistrationConfig(Config):
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
+ self.check_is_for_allowed_local_3pids = config.get(
+ "check_is_for_allowed_local_3pids", None
+ )
+ self.allow_invited_3pids = config.get("allow_invited_3pids", False)
+
+ self.disable_3pid_changes = config.get("disable_3pid_changes", False)
+
self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
self.registration_shared_secret = config.get("registration_shared_secret")
+ self.register_mxid_from_3pid = config.get("register_mxid_from_3pid")
+ self.register_just_use_email_for_display_name = config.get(
+ "register_just_use_email_for_display_name", False
+ )
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
self.trusted_third_party_id_servers = config.get(
"trusted_third_party_id_servers", ["matrix.org", "vector.im"]
)
+ account_threepid_delegates = config.get("account_threepid_delegates") or {}
+ self.account_threepid_delegate_email = account_threepid_delegates.get("email")
+ self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
+
self.default_identity_server = config.get("default_identity_server")
self.allow_guest_access = config.get("allow_guest_access", False)
@@ -111,6 +126,18 @@ class RegistrationConfig(Config):
raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,))
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
+ self.disable_set_displayname = config.get("disable_set_displayname", False)
+ self.disable_set_avatar_url = config.get("disable_set_avatar_url", False)
+
+ self.replicate_user_profiles_to = config.get("replicate_user_profiles_to", [])
+ if not isinstance(self.replicate_user_profiles_to, list):
+ self.replicate_user_profiles_to = [self.replicate_user_profiles_to]
+
+ self.shadow_server = config.get("shadow_server", None)
+ self.rewrite_identity_server_urls = config.get(
+ "rewrite_identity_server_urls", {}
+ )
+
self.disable_msisdn_registration = config.get(
"disable_msisdn_registration", False
)
@@ -209,9 +236,32 @@ class RegistrationConfig(Config):
#
#disable_msisdn_registration: true
+ # Derive the user's matrix ID from a type of 3PID used when registering.
+ # This overrides any matrix ID the user proposes when calling /register
+ # The 3PID type should be present in registrations_require_3pid to avoid
+ # users failing to register if they don't specify the right kind of 3pid.
+ #
+ #register_mxid_from_3pid: email
+
+ # Uncomment to set the display name of new users to their email address,
+ # rather than using the default heuristic.
+ #
+ #register_just_use_email_for_display_name: true
+
# Mandate that users are only allowed to associate certain formats of
# 3PIDs with accounts on this server.
#
+ # Use an Identity Server to establish which 3PIDs are allowed to register?
+ # Overrides allowed_local_3pids below.
+ #
+ #check_is_for_allowed_local_3pids: matrix.org
+ #
+ # If you are using an IS you can also check whether that IS registers
+ # pending invites for the given 3PID (and then allow it to sign up on
+ # the platform):
+ #
+ #allow_invited_3pids: False
+ #
#allowed_local_3pids:
# - medium: email
# pattern: '.*@matrix\\.org'
@@ -220,6 +270,11 @@ class RegistrationConfig(Config):
# - medium: msisdn
# pattern: '\\+44'
+ # If true, stop users from trying to change the 3PIDs associated with
+ # their accounts.
+ #
+ #disable_3pid_changes: False
+
# Enable 3PIDs lookup requests to identity servers from this server.
#
#enable_3pid_lookup: true
@@ -257,10 +312,66 @@ class RegistrationConfig(Config):
# Also defines the ID server which will be called when an account is
# deactivated (one will be picked arbitrarily).
#
+ # Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
+ # server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
+ # background migration script, informing itself that the identity server all of its
+ # 3PIDs have been bound to is likely one of the below.
+ #
+ # As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
+ # it is now solely used for the purposes of the background migration script, and can be
+ # removed once it has run.
#trusted_third_party_id_servers:
# - matrix.org
# - vector.im
+ # If enabled, user IDs, display names and avatar URLs will be replicated
+ # to this server whenever they change.
+ # This is an experimental API currently implemented by sydent to support
+ # cross-homeserver user directories.
+ #
+ #replicate_user_profiles_to: example.com
+
+ # If specified, attempt to replay registrations, profile changes & 3pid
+ # bindings on the given target homeserver via the AS API. The HS is authed
+ # via a given AS token.
+ #
+ #shadow_server:
+ # hs_url: https://shadow.example.com
+ # hs: shadow.example.com
+ # as_token: 12u394refgbdhivsia
+
+ # If enabled, don't let users set their own display names/avatars
+ # other than for the very first time (unless they are a server admin).
+ # Useful when provisioning users based on the contents of a 3rd party
+ # directory and to avoid ambiguities.
+ #
+ #disable_set_displayname: False
+ #disable_set_avatar_url: False
+
+ # Handle threepid (email/phone etc) registration and password resets through a set of
+ # *trusted* identity servers. Note that this allows the configured identity server to
+ # reset passwords for accounts!
+ #
+ # Be aware that if `email` is not set, and SMTP options have not been
+ # configured in the email config block, registration and user password resets via
+ # email will be globally disabled.
+ #
+ # Additionally, if `msisdn` is not set, registration and password resets via msisdn
+ # will be disabled regardless. This is due to Synapse currently not supporting any
+ # method of sending SMS messages on its own.
+ #
+ # To enable using an identity server for operations regarding a particular third-party
+ # identifier type, set the value to the URL of that identity server as shown in the
+ # examples below.
+ #
+ # Servers handling the these requests must answer the `/requestToken` endpoints defined
+ # by the Matrix Identity Service API specification:
+ # https://matrix.org/docs/spec/identity_service/latest
+ #
+ account_threepid_delegates:
+ #email: https://example.com # Delegate email sending to matrix.org
+ #msisdn: http://localhost:8090 # Delegate SMS sending to this local process
+
# Users who register on this homeserver will automatically be joined
# to these rooms
#
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index fdb1f246d0..889ece1dec 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -16,6 +16,7 @@
import os
from collections import namedtuple
+from synapse.python_dependencies import DependencyException, check_requirements
from synapse.util.module_loader import load_module
from ._base import Config, ConfigError
@@ -34,17 +35,6 @@ THUMBNAIL_SIZE_YAML = """\
# method: %(method)s
"""
-MISSING_NETADDR = "Missing netaddr library. This is required for URL preview API."
-
-MISSING_LXML = """Missing lxml library. This is required for URL preview API.
-
- Install by running:
- pip install lxml
-
- Requires libxslt1-dev system package.
- """
-
-
ThumbnailRequirement = namedtuple(
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
)
@@ -104,6 +94,12 @@ class ContentRepositoryConfig(Config):
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
+ self.max_avatar_size = config.get("max_avatar_size")
+ if self.max_avatar_size:
+ self.max_avatar_size = self.parse_size(self.max_avatar_size)
+
+ self.allowed_avatar_mimetypes = config.get("allowed_avatar_mimetypes", [])
+
self.media_store_path = self.ensure_directory(
config.get("media_store_path", "media_store")
)
@@ -171,16 +167,10 @@ class ContentRepositoryConfig(Config):
self.url_preview_enabled = config.get("url_preview_enabled", False)
if self.url_preview_enabled:
try:
- import lxml
+ check_requirements("url_preview")
- lxml # To stop unused lint.
- except ImportError:
- raise ConfigError(MISSING_LXML)
-
- try:
- from netaddr import IPSet
- except ImportError:
- raise ConfigError(MISSING_NETADDR)
+ except DependencyException as e:
+ raise ConfigError(e.message)
if "url_preview_ip_range_blacklist" not in config:
raise ConfigError(
@@ -189,6 +179,9 @@ class ContentRepositoryConfig(Config):
"to work"
)
+ # netaddr is a dependency for url_preview
+ from netaddr import IPSet
+
self.url_preview_ip_range_blacklist = IPSet(
config["url_preview_ip_range_blacklist"]
)
@@ -249,6 +242,30 @@ class ContentRepositoryConfig(Config):
#
#max_upload_size: 10M
+ # The largest allowed size for a user avatar. If not defined, no
+ # restriction will be imposed.
+ #
+ # Note that this only applies when an avatar is changed globally.
+ # Per-room avatar changes are not affected. See allow_per_room_profiles
+ # for disabling that functionality.
+ #
+ # Note that user avatar changes will not work if this is set without
+ # using Synapse's local media repo.
+ #
+ #max_avatar_size: 10M
+
+ # Allow mimetypes for a user avatar. If not defined, no restriction will
+ # be imposed.
+ #
+ # Note that this only applies when an avatar is changed globally.
+ # Per-room avatar changes are not affected. See allow_per_room_profiles
+ # for disabling that functionality.
+ #
+ # Note that user avatar changes will not work if this is set without
+ # using Synapse's local media repo.
+ #
+ #allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"]
+
# Maximum number of pixels that will be thumbnailed
#
#max_image_pixels: 32M
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 15449695d1..6fe9d8c6a8 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -17,8 +17,11 @@
import logging
import os.path
+import re
+from textwrap import indent
import attr
+import yaml
from netaddr import IPSet
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
@@ -89,6 +92,12 @@ class ServerConfig(Config):
"require_auth_for_profile_requests", False
)
+ # Whether to require sharing a room with a user to retrieve their
+ # profile data
+ self.limit_profile_requests_to_known_users = config.get(
+ "limit_profile_requests_to_known_users", False
+ )
+
if "restrict_public_rooms_to_local_users" in config and (
"allow_public_rooms_without_auth" in config
or "allow_public_rooms_over_federation" in config
@@ -159,6 +168,16 @@ class ServerConfig(Config):
self.mau_trial_days = config.get("mau_trial_days", 0)
+ # How long to keep redacted events in the database in unredacted form
+ # before redacting them.
+ redaction_retention_period = config.get("redaction_retention_period", "7d")
+ if redaction_retention_period is not None:
+ self.redaction_retention_period = self.parse_duration(
+ redaction_retention_period
+ )
+ else:
+ self.redaction_retention_period = None
+
# Options to disable HS
self.hs_disabled = config.get("hs_disabled", False)
self.hs_disabled_message = config.get("hs_disabled_message", "")
@@ -216,6 +235,130 @@ class ServerConfig(Config):
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
+ # Whether to show the users on this homeserver in the user directory. Defaults to
+ # True.
+ self.show_users_in_user_directory = config.get(
+ "show_users_in_user_directory", True
+ )
+
+ retention_config = config.get("retention")
+ if retention_config is None:
+ retention_config = {}
+
+ self.retention_enabled = retention_config.get("enabled", False)
+
+ retention_default_policy = retention_config.get("default_policy")
+
+ if retention_default_policy is not None:
+ self.retention_default_min_lifetime = retention_default_policy.get(
+ "min_lifetime"
+ )
+ if self.retention_default_min_lifetime is not None:
+ self.retention_default_min_lifetime = self.parse_duration(
+ self.retention_default_min_lifetime
+ )
+
+ self.retention_default_max_lifetime = retention_default_policy.get(
+ "max_lifetime"
+ )
+ if self.retention_default_max_lifetime is not None:
+ self.retention_default_max_lifetime = self.parse_duration(
+ self.retention_default_max_lifetime
+ )
+
+ if (
+ self.retention_default_min_lifetime is not None
+ and self.retention_default_max_lifetime is not None
+ and (
+ self.retention_default_min_lifetime
+ > self.retention_default_max_lifetime
+ )
+ ):
+ raise ConfigError(
+ "The default retention policy's 'min_lifetime' can not be greater"
+ " than its 'max_lifetime'"
+ )
+ else:
+ self.retention_default_min_lifetime = None
+ self.retention_default_max_lifetime = None
+
+ self.retention_allowed_lifetime_min = retention_config.get(
+ "allowed_lifetime_min"
+ )
+ if self.retention_allowed_lifetime_min is not None:
+ self.retention_allowed_lifetime_min = self.parse_duration(
+ self.retention_allowed_lifetime_min
+ )
+
+ self.retention_allowed_lifetime_max = retention_config.get(
+ "allowed_lifetime_max"
+ )
+ if self.retention_allowed_lifetime_max is not None:
+ self.retention_allowed_lifetime_max = self.parse_duration(
+ self.retention_allowed_lifetime_max
+ )
+
+ if (
+ self.retention_allowed_lifetime_min is not None
+ and self.retention_allowed_lifetime_max is not None
+ and self.retention_allowed_lifetime_min
+ > self.retention_allowed_lifetime_max
+ ):
+ raise ConfigError(
+ "Invalid retention policy limits: 'allowed_lifetime_min' can not be"
+ " greater than 'allowed_lifetime_max'"
+ )
+
+ self.retention_purge_jobs = []
+ for purge_job_config in retention_config.get("purge_jobs", []):
+ interval_config = purge_job_config.get("interval")
+
+ if interval_config is None:
+ raise ConfigError(
+ "A retention policy's purge jobs configuration must have the"
+ " 'interval' key set."
+ )
+
+ interval = self.parse_duration(interval_config)
+
+ shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
+
+ if shortest_max_lifetime is not None:
+ shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
+
+ longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
+
+ if longest_max_lifetime is not None:
+ longest_max_lifetime = self.parse_duration(longest_max_lifetime)
+
+ if (
+ shortest_max_lifetime is not None
+ and longest_max_lifetime is not None
+ and shortest_max_lifetime > longest_max_lifetime
+ ):
+ raise ConfigError(
+ "A retention policy's purge jobs configuration's"
+ " 'shortest_max_lifetime' value can not be greater than its"
+ " 'longest_max_lifetime' value."
+ )
+
+ self.retention_purge_jobs.append(
+ {
+ "interval": interval,
+ "shortest_max_lifetime": shortest_max_lifetime,
+ "longest_max_lifetime": longest_max_lifetime,
+ }
+ )
+
+ if not self.retention_purge_jobs:
+ self.retention_purge_jobs = [
+ {
+ "interval": self.parse_duration("1d"),
+ "shortest_max_lifetime": None,
+ "longest_max_lifetime": None,
+ }
+ ]
+
self.listeners = []
for listener in config.get("listeners", []):
if not isinstance(listener.get("port", None), int):
@@ -352,7 +495,7 @@ class ServerConfig(Config):
return any(l["tls"] for l in self.listeners)
def generate_config_section(
- self, server_name, data_dir_path, open_private_ports, **kwargs
+ self, server_name, data_dir_path, open_private_ports, listeners, **kwargs
):
_, bind_port = parse_and_validate_server_name(server_name)
if bind_port is not None:
@@ -366,11 +509,68 @@ class ServerConfig(Config):
# Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
# default config string
default_room_version = DEFAULT_ROOM_VERSION
+ secure_listeners = []
+ unsecure_listeners = []
+ private_addresses = ["::1", "127.0.0.1"]
+ if listeners:
+ for listener in listeners:
+ if listener["tls"]:
+ secure_listeners.append(listener)
+ else:
+ # If we don't want open ports we need to bind the listeners
+ # to some address other than 0.0.0.0. Here we chose to use
+ # localhost.
+ # If the addresses are already bound we won't overwrite them
+ # however.
+ if not open_private_ports:
+ listener.setdefault("bind_addresses", private_addresses)
+
+ unsecure_listeners.append(listener)
+
+ secure_http_bindings = indent(
+ yaml.dump(secure_listeners), " " * 10
+ ).lstrip()
+
+ unsecure_http_bindings = indent(
+ yaml.dump(unsecure_listeners), " " * 10
+ ).lstrip()
+
+ if not unsecure_listeners:
+ unsecure_http_bindings = (
+ """- port: %(unsecure_port)s
+ tls: false
+ type: http
+ x_forwarded: true"""
+ % locals()
+ )
+
+ if not open_private_ports:
+ unsecure_http_bindings += (
+ "\n bind_addresses: ['::1', '127.0.0.1']"
+ )
+
+ unsecure_http_bindings += """
- unsecure_http_binding = "port: %i\n tls: false" % (unsecure_port,)
- if not open_private_ports:
- unsecure_http_binding += (
- "\n bind_addresses: ['::1', '127.0.0.1']"
+ resources:
+ - names: [client, federation]
+ compress: false"""
+
+ if listeners:
+ # comment out this block
+ unsecure_http_bindings = "#" + re.sub(
+ "\n {10}",
+ lambda match: match.group(0) + "#",
+ unsecure_http_bindings,
+ )
+
+ if not secure_listeners:
+ secure_http_bindings = (
+ """#- port: %(bind_port)s
+ # type: http
+ # tls: true
+ # resources:
+ # - names: [client, federation]"""
+ % locals()
)
return (
@@ -419,6 +619,13 @@ class ServerConfig(Config):
#
#require_auth_for_profile_requests: true
+ # Whether to require a user to share a room with another user in order
+ # to retrieve their profile information. Only checked on Client-Server
+ # requests. Profile requests from other servers should be checked by the
+ # requesting server. Defaults to 'false'.
+ #
+ # limit_profile_requests_to_known_users: true
+
# If set to 'false', requires authentication to access the server's public rooms
# directory through the client API. Defaults to 'true'.
#
@@ -556,11 +763,7 @@ class ServerConfig(Config):
# will also need to give Synapse a TLS key and certificate: see the TLS section
# below.)
#
- #- port: %(bind_port)s
- # type: http
- # tls: true
- # resources:
- # - names: [client, federation]
+ %(secure_http_bindings)s
# Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
# that unwraps TLS.
@@ -568,13 +771,7 @@ class ServerConfig(Config):
# If you plan to use a reverse proxy, please see
# https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
#
- - %(unsecure_http_binding)s
- type: http
- x_forwarded: true
-
- resources:
- - names: [client, federation]
- compress: false
+ %(unsecure_http_bindings)s
# example additional_resources:
#
@@ -668,6 +865,81 @@ class ServerConfig(Config):
# Defaults to 'true'.
#
#allow_per_room_profiles: false
+
+ # Whether to show the users on this homeserver in the user directory. Defaults to
+ # 'true'.
+ #
+ #show_users_in_user_directory: false
+
+ # Message retention policy at the server level.
+ #
+ # Room admins and mods can define a retention period for their rooms using the
+ # 'm.room.retention' state event, and server admins can cap this period by setting
+ # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
+ #
+ # If this feature is enabled, Synapse will regularly look for and purge events
+ # which are older than the room's maximum retention period. Synapse will also
+ # filter events received over federation so that events that should have been
+ # purged are ignored and not stored again.
+ #
+ retention:
+ # The message retention policies feature is disabled by default. Uncomment the
+ # following line to enable it.
+ #
+ #enabled: true
+
+ # Default retention policy. If set, Synapse will apply it to rooms that lack the
+ # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
+ # matter much because Synapse doesn't take it into account yet.
+ #
+ #default_policy:
+ # min_lifetime: 1d
+ # max_lifetime: 1y
+
+ # Retention policy limits. If set, a user won't be able to send a
+ # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
+ # that's not within this range. This is especially useful in closed federations,
+ # in which server admins can make sure every federating server applies the same
+ # rules.
+ #
+ #allowed_lifetime_min: 1d
+ #allowed_lifetime_max: 1y
+
+ # Server admins can define the settings of the background jobs purging the
+ # events which lifetime has expired under the 'purge_jobs' section.
+ #
+ # If no configuration is provided, a single job will be set up to delete expired
+ # events in every room daily.
+ #
+ # Each job's configuration defines which range of message lifetimes the job
+ # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
+ # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
+ # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
+ # lower than or equal to 3 days. Both the minimum and the maximum value of a
+ # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
+ # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
+ # which 'max_lifetime' is lower than or equal to three days.
+ #
+ # The rationale for this per-job configuration is that some rooms might have a
+ # retention policy with a low 'max_lifetime', where history needs to be purged
+ # of outdated messages on a very frequent basis (e.g. every 5min), but not want
+ # that purge to be performed by a job that's iterating over every room it knows,
+ # which would be quite heavy on the server.
+ #
+ #purge_jobs:
+ # - shortest_max_lifetime: 1d
+ # longest_max_lifetime: 3d
+ # interval: 5m:
+ # - shortest_max_lifetime: 3d
+ # longest_max_lifetime: 1y
+ # interval: 24h
+
+ # How long to keep redacted events in unredacted form in the database. After
+ # this period redacted events get replaced with their redacted form in the DB.
+ #
+ # Defaults to `7d`. Set to `null` to disable.
+ #
+ redaction_retention_period: 7d
"""
% locals()
)
diff --git a/synapse/config/stats.py b/synapse/config/stats.py
index b518a3ed9c..b18ddbd1fa 100644
--- a/synapse/config/stats.py
+++ b/synapse/config/stats.py
@@ -27,19 +27,16 @@ class StatsConfig(Config):
def read_config(self, config, **kwargs):
self.stats_enabled = True
- self.stats_bucket_size = 86400
+ self.stats_bucket_size = 86400 * 1000
self.stats_retention = sys.maxsize
stats_config = config.get("stats", None)
if stats_config:
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
- self.stats_bucket_size = (
- self.parse_duration(stats_config.get("bucket_size", "1d")) / 1000
+ self.stats_bucket_size = self.parse_duration(
+ stats_config.get("bucket_size", "1d")
)
- self.stats_retention = (
- self.parse_duration(
- stats_config.get("retention", "%ds" % (sys.maxsize,))
- )
- / 1000
+ self.stats_retention = self.parse_duration(
+ stats_config.get("retention", "%ds" % (sys.maxsize,))
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index ca508a224f..c0148aa95c 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -239,12 +239,38 @@ class TlsConfig(Config):
self.tls_fingerprints.append({"sha256": sha256_fingerprint})
def generate_config_section(
- self, config_dir_path, server_name, data_dir_path, **kwargs
+ self,
+ config_dir_path,
+ server_name,
+ data_dir_path,
+ tls_certificate_path,
+ tls_private_key_path,
+ acme_domain,
+ **kwargs
):
+ """If the acme_domain is specified acme will be enabled.
+ If the TLS paths are not specified the default will be certs in the
+ config directory"""
+
base_key_name = os.path.join(config_dir_path, server_name)
- tls_certificate_path = base_key_name + ".tls.crt"
- tls_private_key_path = base_key_name + ".tls.key"
+ if bool(tls_certificate_path) != bool(tls_private_key_path):
+ raise ConfigError(
+ "Please specify both a cert path and a key path or neither."
+ )
+
+ tls_enabled = (
+ "" if tls_certificate_path and tls_private_key_path or acme_domain else "#"
+ )
+
+ if not tls_certificate_path:
+ tls_certificate_path = base_key_name + ".tls.crt"
+ if not tls_private_key_path:
+ tls_private_key_path = base_key_name + ".tls.key"
+
+ acme_enabled = bool(acme_domain)
+ acme_domain = "matrix.example.com"
+
default_acme_account_file = os.path.join(data_dir_path, "acme_account.key")
# this is to avoid the max line length. Sorrynotsorry
@@ -269,11 +295,11 @@ class TlsConfig(Config):
# instance, if using certbot, use `fullchain.pem` as your certificate,
# not `cert.pem`).
#
- #tls_certificate_path: "%(tls_certificate_path)s"
+ %(tls_enabled)stls_certificate_path: "%(tls_certificate_path)s"
# PEM-encoded private key for TLS
#
- #tls_private_key_path: "%(tls_private_key_path)s"
+ %(tls_enabled)stls_private_key_path: "%(tls_private_key_path)s"
# Whether to verify TLS server certificates for outbound federation requests.
#
@@ -340,10 +366,10 @@ class TlsConfig(Config):
# permission to listen on port 80.
#
acme:
- # ACME support is disabled by default. Uncomment the following line
- # (and tls_certificate_path and tls_private_key_path above) to enable it.
+ # ACME support is disabled by default. Set this to `true` and uncomment
+ # tls_certificate_path and tls_private_key_path above to enable it.
#
- #enabled: true
+ enabled: %(acme_enabled)s
# Endpoint to use to request certificates. If you only want to test,
# use Let's Encrypt's staging url:
@@ -354,17 +380,17 @@ class TlsConfig(Config):
# Port number to listen on for the HTTP-01 challenge. Change this if
# you are forwarding connections through Apache/Nginx/etc.
#
- #port: 80
+ port: 80
# Local addresses to listen on for incoming connections.
# Again, you may want to change this if you are forwarding connections
# through Apache/Nginx/etc.
#
- #bind_addresses: ['::', '0.0.0.0']
+ bind_addresses: ['::', '0.0.0.0']
# How many days remaining on a certificate before it is renewed.
#
- #reprovision_threshold: 30
+ reprovision_threshold: 30
# The domain that the certificate should be for. Normally this
# should be the same as your Matrix domain (i.e., 'server_name'), but,
@@ -378,7 +404,7 @@ class TlsConfig(Config):
#
# If not set, defaults to your 'server_name'.
#
- #domain: matrix.example.com
+ domain: %(acme_domain)s
# file to use for the account key. This will be generated if it doesn't
# exist.
diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py
index 95e7ccb3a3..85d99a3166 100644
--- a/synapse/config/tracer.py
+++ b/synapse/config/tracer.py
@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from synapse.python_dependencies import DependencyException, check_requirements
+
from ._base import Config, ConfigError
@@ -32,6 +34,11 @@ class TracerConfig(Config):
if not self.opentracer_enabled:
return
+ try:
+ check_requirements("opentracing")
+ except DependencyException as e:
+ raise ConfigError(e.message)
+
# The tracer is enabled so sanitize the config
self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", [])
diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py
index f6313e17d4..96493a5dcc 100644
--- a/synapse/config/user_directory.py
+++ b/synapse/config/user_directory.py
@@ -24,6 +24,7 @@ class UserDirectoryConfig(Config):
def read_config(self, config, **kwargs):
self.user_directory_search_enabled = True
self.user_directory_search_all_users = False
+ self.user_directory_defer_to_id_server = None
user_directory_config = config.get("user_directory", None)
if user_directory_config:
self.user_directory_search_enabled = user_directory_config.get(
@@ -32,6 +33,9 @@ class UserDirectoryConfig(Config):
self.user_directory_search_all_users = user_directory_config.get(
"search_all_users", False
)
+ self.user_directory_defer_to_id_server = user_directory_config.get(
+ "defer_to_id_server", None
+ )
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """
@@ -50,4 +54,9 @@ class UserDirectoryConfig(Config):
#user_directory:
# enabled: true
# search_all_users: false
+ #
+ # # If this is set, user search will be delegated to this ID server instead
+ # # of synapse performing the search itself.
+ # # This is an experimental API.
+ # defer_to_id_server: https://id.example.com
"""
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index 41eabbe717..694fb2c816 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -83,7 +83,7 @@ def compute_content_hash(event_dict, hash_algorithm):
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
- return (hashed.name, hashed.digest())
+ return hashed.name, hashed.digest()
def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
@@ -106,7 +106,7 @@ def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
event_dict.pop("unsigned", None)
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
- return (hashed.name, hashed.digest())
+ return hashed.name, hashed.digest()
def compute_event_signature(event_dict, signature_name, signing_key):
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 6c3e885e72..7cfad192e8 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -18,7 +18,6 @@ import logging
from collections import defaultdict
import six
-from six import raise_from
from six.moves import urllib
import attr
@@ -30,7 +29,6 @@ from signedjson.key import (
from signedjson.sign import (
SignatureVerifyException,
encode_canonical_json,
- sign_json,
signature_ids,
verify_signed_json,
)
@@ -540,13 +538,7 @@ class BaseV2KeyFetcher(object):
verify_key=verify_key, valid_until_ts=key_data["expired_ts"]
)
- # re-sign the json with our own key, so that it is ready if we are asked to
- # give it out as a notary server
- signed_key_json = sign_json(
- response_json, self.config.server_name, self.config.signing_key[0]
- )
-
- signed_key_json_bytes = encode_canonical_json(signed_key_json)
+ key_json_bytes = encode_canonical_json(response_json)
yield make_deferred_yieldable(
defer.gatherResults(
@@ -558,7 +550,7 @@ class BaseV2KeyFetcher(object):
from_server=from_server,
ts_now_ms=time_added_ms,
ts_expires_ms=ts_valid_until_ms,
- key_json_bytes=signed_key_json_bytes,
+ key_json_bytes=key_json_bytes,
)
for key_id in verify_keys
],
@@ -657,9 +649,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
},
)
except (NotRetryingDestination, RequestSendFailed) as e:
- raise_from(KeyLookupError("Failed to connect to remote server"), e)
+ # these both have str() representations which we can't really improve upon
+ raise KeyLookupError(str(e))
except HttpResponseException as e:
- raise_from(KeyLookupError("Remote server returned an error"), e)
+ raise KeyLookupError("Remote server returned an error: %s" % (e,))
keys = {}
added_keys = []
@@ -821,9 +814,11 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
timeout=10000,
)
except (NotRetryingDestination, RequestSendFailed) as e:
- raise_from(KeyLookupError("Failed to connect to remote server"), e)
+ # these both have str() representations which we can't really improve
+ # upon
+ raise KeyLookupError(str(e))
except HttpResponseException as e:
- raise_from(KeyLookupError("Remote server returned an error"), e)
+ raise KeyLookupError("Remote server returned an error: %s" % (e,))
if response["server_name"] != server_name:
raise KeyLookupError(
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index cd52e3f867..4e91df60e6 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -637,11 +637,11 @@ def auth_types_for_event(event):
if event.type == EventTypes.Create:
return []
- auth_types = []
-
- auth_types.append((EventTypes.PowerLevels, ""))
- auth_types.append((EventTypes.Member, event.sender))
- auth_types.append((EventTypes.Create, ""))
+ auth_types = [
+ (EventTypes.PowerLevels, ""),
+ (EventTypes.Member, event.sender),
+ (EventTypes.Create, ""),
+ ]
if event.type == EventTypes.Member:
membership = event.content["membership"]
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 129771f183..f0de4d961f 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -46,13 +46,33 @@ class SpamChecker(object):
return self.spam_checker.check_event_for_spam(event)
- def user_may_invite(self, inviter_userid, invitee_userid, room_id):
+ def user_may_invite(
+ self,
+ inviter_userid,
+ invitee_userid,
+ third_party_invite,
+ room_id,
+ new_room,
+ published_room,
+ ):
"""Checks if a given user may send an invite
If this method returns false, the invite will be rejected.
Args:
- userid (string): The sender's user ID
+ inviter_userid (str)
+ invitee_userid (str|None): The user ID of the invitee. Is None
+ if this is a third party invite and the 3PID is not bound to a
+ user ID.
+ third_party_invite (dict|None): If a third party invite then is a
+ dict containing the medium and address of the invitee.
+ room_id (str)
+ new_room (bool): Whether the user is being invited to the room as
+ part of a room creation, if so the invitee would have been
+ included in the call to `user_may_create_room`.
+ published_room (bool): Whether the room the user is being invited
+ to has been published in the local homeserver's public room
+ directory.
Returns:
bool: True if the user may send an invite, otherwise False
@@ -61,16 +81,29 @@ class SpamChecker(object):
return True
return self.spam_checker.user_may_invite(
- inviter_userid, invitee_userid, room_id
+ inviter_userid,
+ invitee_userid,
+ third_party_invite,
+ room_id,
+ new_room,
+ published_room,
)
- def user_may_create_room(self, userid):
+ def user_may_create_room(
+ self, userid, invite_list, third_party_invite_list, cloning
+ ):
"""Checks if a given user may create a room
If this method returns false, the creation request will be rejected.
Args:
userid (string): The sender's user ID
+ invite_list (list[str]): List of user IDs that would be invited to
+ the new room.
+ third_party_invite_list (list[dict]): List of third party invites
+ for the new room.
+ cloning (bool): Whether the user is cloning an existing room, e.g.
+ upgrading a room.
Returns:
bool: True if the user may create a room, otherwise False
@@ -78,7 +111,9 @@ class SpamChecker(object):
if self.spam_checker is None:
return True
- return self.spam_checker.user_may_create_room(userid)
+ return self.spam_checker.user_may_create_room(
+ userid, invite_list, third_party_invite_list, cloning
+ )
def user_may_create_room_alias(self, userid, room_alias):
"""Checks if a given user may create a room alias
@@ -113,3 +148,21 @@ class SpamChecker(object):
return True
return self.spam_checker.user_may_publish_room(userid, room_id)
+
+ def user_may_join_room(self, userid, room_id, is_invited):
+ """Checks if a given users is allowed to join a room.
+
+ Is not called when the user creates a room.
+
+ Args:
+ userid (str)
+ room_id (str)
+ is_invited (bool): Whether the user is invited into the room
+
+ Returns:
+ bool: Whether the user may join the room
+ """
+ if self.spam_checker is None:
+ return True
+
+ return self.spam_checker.user_may_join_room(userid, room_id, is_invited)
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index 272426e105..9b90c9ce04 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from six import string_types
+from six import integer_types, string_types
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
@@ -22,11 +22,12 @@ from synapse.types import EventID, RoomID, UserID
class EventValidator(object):
- def validate_new(self, event):
+ def validate_new(self, event, config):
"""Validates the event has roughly the right format
Args:
- event (FrozenEvent)
+ event (FrozenEvent): The event to validate.
+ config (Config): The homeserver's configuration.
"""
self.validate_builder(event)
@@ -67,6 +68,99 @@ class EventValidator(object):
Codes.INVALID_PARAM,
)
+ if event.type == EventTypes.Retention:
+ self._validate_retention(event, config)
+
+ def _validate_retention(self, event, config):
+ """Checks that an event that defines the retention policy for a room respects the
+ boundaries imposed by the server's administrator.
+
+ Args:
+ event (FrozenEvent): The event to validate.
+ config (Config): The homeserver's configuration.
+ """
+ min_lifetime = event.content.get("min_lifetime")
+ max_lifetime = event.content.get("max_lifetime")
+
+ if min_lifetime is not None:
+ if not isinstance(min_lifetime, integer_types):
+ raise SynapseError(
+ code=400,
+ msg="'min_lifetime' must be an integer",
+ errcode=Codes.BAD_JSON,
+ )
+
+ if (
+ config.retention_allowed_lifetime_min is not None
+ and min_lifetime < config.retention_allowed_lifetime_min
+ ):
+ raise SynapseError(
+ code=400,
+ msg=(
+ "'min_lifetime' can't be lower than the minimum allowed"
+ " value enforced by the server's administrator"
+ ),
+ errcode=Codes.BAD_JSON,
+ )
+
+ if (
+ config.retention_allowed_lifetime_max is not None
+ and min_lifetime > config.retention_allowed_lifetime_max
+ ):
+ raise SynapseError(
+ code=400,
+ msg=(
+ "'min_lifetime' can't be greater than the maximum allowed"
+ " value enforced by the server's administrator"
+ ),
+ errcode=Codes.BAD_JSON,
+ )
+
+ if max_lifetime is not None:
+ if not isinstance(max_lifetime, integer_types):
+ raise SynapseError(
+ code=400,
+ msg="'max_lifetime' must be an integer",
+ errcode=Codes.BAD_JSON,
+ )
+
+ if (
+ config.retention_allowed_lifetime_min is not None
+ and max_lifetime < config.retention_allowed_lifetime_min
+ ):
+ raise SynapseError(
+ code=400,
+ msg=(
+ "'max_lifetime' can't be lower than the minimum allowed value"
+ " enforced by the server's administrator"
+ ),
+ errcode=Codes.BAD_JSON,
+ )
+
+ if (
+ config.retention_allowed_lifetime_max is not None
+ and max_lifetime > config.retention_allowed_lifetime_max
+ ):
+ raise SynapseError(
+ code=400,
+ msg=(
+ "'max_lifetime' can't be greater than the maximum allowed"
+ " value enforced by the server's administrator"
+ ),
+ errcode=Codes.BAD_JSON,
+ )
+
+ if (
+ min_lifetime is not None
+ and max_lifetime is not None
+ and min_lifetime > max_lifetime
+ ):
+ raise SynapseError(
+ code=400,
+ msg="'min_lifetime' can't be greater than 'max_lifetime",
+ errcode=Codes.BAD_JSON,
+ )
+
def validate_builder(self, event):
"""Validates that the builder/event has roughly the right format. Only
checks values that we expect a proto event to have, rather than all the
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index bec3080895..6ee6216660 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -355,7 +355,7 @@ class FederationClient(FederationBase):
auth_chain.sort(key=lambda e: e.depth)
- return (pdus, auth_chain)
+ return pdus, auth_chain
except HttpResponseException as e:
if e.code == 400 or e.code == 404:
logger.info("Failed to use get_room_state_ids API, falling back")
@@ -404,7 +404,7 @@ class FederationClient(FederationBase):
signed_auth.sort(key=lambda e: e.depth)
- return (signed_pdus, signed_auth)
+ return signed_pdus, signed_auth
@defer.inlineCallbacks
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
@@ -429,7 +429,7 @@ class FederationClient(FederationBase):
missing_events.discard(k)
if not missing_events:
- return (signed_events, failed_to_fetch)
+ return signed_events, failed_to_fetch
logger.debug(
"Fetching unknown state/auth events %s for room %s",
@@ -465,7 +465,7 @@ class FederationClient(FederationBase):
# We removed all events we successfully fetched from `batch`
failed_to_fetch.update(batch)
- return (signed_events, failed_to_fetch)
+ return signed_events, failed_to_fetch
@defer.inlineCallbacks
@log_function
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index d216c46dfe..da06ab379d 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -43,6 +43,7 @@ from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.http.endpoint import parse_server_name
from synapse.logging.context import nested_logging_context
+from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
from synapse.logging.utils import log_function
from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
@@ -99,7 +100,7 @@ class FederationServer(FederationBase):
res = self._transaction_from_pdus(pdus).get_dict()
- return (200, res)
+ return 200, res
@defer.inlineCallbacks
@log_function
@@ -162,7 +163,7 @@ class FederationServer(FederationBase):
yield self.transaction_actions.set_response(
origin, transaction, 400, response
)
- return (400, response)
+ return 400, response
received_pdus_counter.inc(len(transaction.pdus))
@@ -264,7 +265,7 @@ class FederationServer(FederationBase):
logger.debug("Returning: %s", str(response))
yield self.transaction_actions.set_response(origin, transaction, 200, response)
- return (200, response)
+ return 200, response
@defer.inlineCallbacks
def received_edu(self, origin, edu_type, content):
@@ -297,7 +298,7 @@ class FederationServer(FederationBase):
event_id,
)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_state_ids_request(self, origin, room_id, event_id):
@@ -314,7 +315,7 @@ class FederationServer(FederationBase):
state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
- return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids})
+ return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
@defer.inlineCallbacks
def _on_context_state_request_compute(self, room_id, event_id):
@@ -344,15 +345,15 @@ class FederationServer(FederationBase):
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
if pdu:
- return (200, self._transaction_from_pdus([pdu]).get_dict())
+ return 200, self._transaction_from_pdus([pdu]).get_dict()
else:
- return (404, "")
+ return 404, ""
@defer.inlineCallbacks
def on_query_request(self, query_type, args):
received_queries_counter.labels(query_type).inc()
resp = yield self.registry.on_query(query_type, args)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
@@ -434,7 +435,7 @@ class FederationServer(FederationBase):
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
yield self.handler.on_send_leave_request(origin, pdu)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_event_auth(self, origin, room_id, event_id):
@@ -445,7 +446,7 @@ class FederationServer(FederationBase):
time_now = self._clock.time_msec()
auth_pdus = yield self.handler.on_event_auth(event_id)
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
- return (200, res)
+ return 200, res
@defer.inlineCallbacks
def on_query_auth_request(self, origin, content, room_id, event_id):
@@ -498,7 +499,7 @@ class FederationServer(FederationBase):
"missing": ret.get("missing", []),
}
- return (200, send_content)
+ return 200, send_content
@log_function
def on_query_client_keys(self, origin, content):
@@ -507,6 +508,7 @@ class FederationServer(FederationBase):
def on_query_user_devices(self, origin, user_id):
return self.on_query_request("user_devices", user_id)
+ @trace
@defer.inlineCallbacks
@log_function
def on_claim_client_keys(self, origin, content):
@@ -515,6 +517,7 @@ class FederationServer(FederationBase):
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
+ log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
results = yield self.store.claim_e2e_one_time_keys(query)
json_result = {}
@@ -666,9 +669,9 @@ class FederationServer(FederationBase):
return ret
@defer.inlineCallbacks
- def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
+ def on_exchange_third_party_invite_request(self, room_id, event_dict):
ret = yield self.handler.on_exchange_third_party_invite_request(
- origin, room_id, event_dict
+ room_id, event_dict
)
return ret
@@ -808,12 +811,13 @@ class FederationHandlerRegistry(object):
if not handler:
logger.warn("No handler registered for EDU type %s", edu_type)
- try:
- yield handler(origin, content)
- except SynapseError as e:
- logger.info("Failed to handle edu %r: %r", edu_type, e)
- except Exception:
- logger.exception("Failed to handle edu %r", edu_type)
+ with start_active_span_from_edu(content, "handle_edu"):
+ try:
+ yield handler(origin, content)
+ except SynapseError as e:
+ logger.info("Failed to handle edu %r: %r", edu_type, e)
+ except Exception:
+ logger.exception("Failed to handle edu %r", edu_type)
def on_query(self, query_type, args):
handler = self.query_handlers.get(query_type)
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index d46f4aaeb1..36f6d470dc 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -49,7 +49,7 @@ sent_pdus_destination_dist_count = Counter(
sent_pdus_destination_dist_total = Counter(
"synapse_federation_client_sent_pdu_destinations:total",
- "" "Total number of PDUs queued for sending across all destinations",
+ "Total number of PDUs queued for sending across all destinations",
)
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index 52706302f2..5b6c79c51a 100644
--- a/synapse/federation/sender/transaction_manager.py
+++ b/synapse/federation/sender/transaction_manager.py
@@ -14,11 +14,20 @@
# limitations under the License.
import logging
+from canonicaljson import json
+
from twisted.internet import defer
from synapse.api.errors import HttpResponseException
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Transaction
+from synapse.logging.opentracing import (
+ extract_text_map,
+ set_tag,
+ start_active_span_follows_from,
+ tags,
+ whitelisted_homeserver,
+)
from synapse.util.metrics import measure_func
logger = logging.getLogger(__name__)
@@ -44,93 +53,115 @@ class TransactionManager(object):
@defer.inlineCallbacks
def send_new_transaction(self, destination, pending_pdus, pending_edus):
- # Sort based on the order field
- pending_pdus.sort(key=lambda t: t[1])
- pdus = [x[0] for x in pending_pdus]
- edus = pending_edus
-
- success = True
-
- logger.debug("TX [%s] _attempt_new_transaction", destination)
-
- txn_id = str(self._next_txn_id)
-
- logger.debug(
- "TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)",
- destination,
- txn_id,
- len(pdus),
- len(edus),
- )
-
- transaction = Transaction.create_new(
- origin_server_ts=int(self.clock.time_msec()),
- transaction_id=txn_id,
- origin=self._server_name,
- destination=destination,
- pdus=pdus,
- edus=edus,
- )
-
- self._next_txn_id += 1
-
- logger.info(
- "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
- destination,
- txn_id,
- transaction.transaction_id,
- len(pdus),
- len(edus),
- )
-
- # Actually send the transaction
-
- # FIXME (erikj): This is a bit of a hack to make the Pdu age
- # keys work
- def json_data_cb():
- data = transaction.get_dict()
- now = int(self.clock.time_msec())
- if "pdus" in data:
- for p in data["pdus"]:
- if "age_ts" in p:
- unsigned = p.setdefault("unsigned", {})
- unsigned["age"] = now - int(p["age_ts"])
- del p["age_ts"]
- return data
-
- try:
- response = yield self._transport_layer.send_transaction(
- transaction, json_data_cb
+ # Make a transaction-sending opentracing span. This span follows on from
+ # all the edus in that transaction. This needs to be done since there is
+ # no active span here, so if the edus were not received by the remote the
+ # span would have no causality and it would be forgotten.
+ # The span_contexts is a generator so that it won't be evaluated if
+ # opentracing is disabled. (Yay speed!)
+
+ span_contexts = []
+ keep_destination = whitelisted_homeserver(destination)
+
+ for edu in pending_edus:
+ context = edu.get_context()
+ if context:
+ span_contexts.append(extract_text_map(json.loads(context)))
+ if keep_destination:
+ edu.strip_context()
+
+ with start_active_span_follows_from("send_transaction", span_contexts):
+
+ # Sort based on the order field
+ pending_pdus.sort(key=lambda t: t[1])
+ pdus = [x[0] for x in pending_pdus]
+ edus = pending_edus
+
+ success = True
+
+ logger.debug("TX [%s] _attempt_new_transaction", destination)
+
+ txn_id = str(self._next_txn_id)
+
+ logger.debug(
+ "TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)",
+ destination,
+ txn_id,
+ len(pdus),
+ len(edus),
+ )
+
+ transaction = Transaction.create_new(
+ origin_server_ts=int(self.clock.time_msec()),
+ transaction_id=txn_id,
+ origin=self._server_name,
+ destination=destination,
+ pdus=pdus,
+ edus=edus,
)
- code = 200
- except HttpResponseException as e:
- code = e.code
- response = e.response
- if e.code in (401, 404, 429) or 500 <= e.code:
- logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
- raise e
+ self._next_txn_id += 1
- logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
+ logger.info(
+ "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
+ destination,
+ txn_id,
+ transaction.transaction_id,
+ len(pdus),
+ len(edus),
+ )
- if code == 200:
- for e_id, r in response.get("pdus", {}).items():
- if "error" in r:
+ # Actually send the transaction
+
+ # FIXME (erikj): This is a bit of a hack to make the Pdu age
+ # keys work
+ def json_data_cb():
+ data = transaction.get_dict()
+ now = int(self.clock.time_msec())
+ if "pdus" in data:
+ for p in data["pdus"]:
+ if "age_ts" in p:
+ unsigned = p.setdefault("unsigned", {})
+ unsigned["age"] = now - int(p["age_ts"])
+ del p["age_ts"]
+ return data
+
+ try:
+ response = yield self._transport_layer.send_transaction(
+ transaction, json_data_cb
+ )
+ code = 200
+ except HttpResponseException as e:
+ code = e.code
+ response = e.response
+
+ if e.code in (401, 404, 429) or 500 <= e.code:
+ logger.info(
+ "TX [%s] {%s} got %d response", destination, txn_id, code
+ )
+ raise e
+
+ logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
+
+ if code == 200:
+ for e_id, r in response.get("pdus", {}).items():
+ if "error" in r:
+ logger.warn(
+ "TX [%s] {%s} Remote returned error for %s: %s",
+ destination,
+ txn_id,
+ e_id,
+ r,
+ )
+ else:
+ for p in pdus:
logger.warn(
- "TX [%s] {%s} Remote returned error for %s: %s",
+ "TX [%s] {%s} Failed to send event %s",
destination,
txn_id,
- e_id,
- r,
+ p.event_id,
)
- else:
- for p in pdus:
- logger.warn(
- "TX [%s] {%s} Failed to send event %s",
- destination,
- txn_id,
- p.event_id,
- )
- success = False
+ success = False
- return success
+ set_tag(tags.ERROR, not success)
+ return success
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 0cea0d2a10..482a101c09 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -327,21 +327,37 @@ class TransportLayerClient(object):
include_all_networks=False,
third_party_instance_id=None,
):
- path = _create_v1_path("/publicRooms")
-
- args = {"include_all_networks": "true" if include_all_networks else "false"}
- if third_party_instance_id:
- args["third_party_instance_id"] = (third_party_instance_id,)
- if limit:
- args["limit"] = [str(limit)]
- if since_token:
- args["since"] = [since_token]
-
- # TODO(erikj): Actually send the search_filter across federation.
-
- response = yield self.client.get_json(
- destination=remote_server, path=path, args=args, ignore_backoff=True
- )
+ if search_filter:
+ # this uses MSC2197 (Search Filtering over Federation)
+ path = _create_v1_path("/publicRooms")
+
+ data = {"include_all_networks": "true" if include_all_networks else "false"}
+ if third_party_instance_id:
+ data["third_party_instance_id"] = third_party_instance_id
+ if limit:
+ data["limit"] = str(limit)
+ if since_token:
+ data["since"] = since_token
+
+ data["filter"] = search_filter
+
+ response = yield self.client.post_json(
+ destination=remote_server, path=path, data=data, ignore_backoff=True
+ )
+ else:
+ path = _create_v1_path("/publicRooms")
+
+ args = {"include_all_networks": "true" if include_all_networks else "false"}
+ if third_party_instance_id:
+ args["third_party_instance_id"] = (third_party_instance_id,)
+ if limit:
+ args["limit"] = [str(limit)]
+ if since_token:
+ args["since"] = [since_token]
+
+ response = yield self.client.get_json(
+ destination=remote_server, path=path, args=args, ignore_backoff=True
+ )
return response
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 9a86bd0263..7dc696c7ae 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -22,7 +22,6 @@ import re
from twisted.internet.defer import maybeDeferred
import synapse
-import synapse.logging.opentracing as opentracing
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import (
@@ -39,6 +38,12 @@ from synapse.http.servlet import (
parse_string_from_args,
)
from synapse.logging.context import run_in_background
+from synapse.logging.opentracing import (
+ start_active_span,
+ start_active_span_from_request,
+ tags,
+ whitelisted_homeserver,
+)
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.versionstring import get_version_string
@@ -288,19 +293,28 @@ class BaseFederationServlet(object):
logger.warn("authenticate_request failed: %s", e)
raise
- # Start an opentracing span
- with opentracing.start_active_span_from_context(
- request.requestHeaders,
- "incoming-federation-request",
- tags={
- "request_id": request.get_request_id(),
- opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_SERVER,
- opentracing.tags.HTTP_METHOD: request.get_method(),
- opentracing.tags.HTTP_URL: request.get_redacted_uri(),
- opentracing.tags.PEER_HOST_IPV6: request.getClientIP(),
- "authenticated_entity": origin,
- },
- ):
+ request_tags = {
+ "request_id": request.get_request_id(),
+ tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
+ tags.HTTP_METHOD: request.get_method(),
+ tags.HTTP_URL: request.get_redacted_uri(),
+ tags.PEER_HOST_IPV6: request.getClientIP(),
+ "authenticated_entity": origin,
+ "servlet_name": request.request_metrics.name,
+ }
+
+ # Only accept the span context if the origin is authenticated
+ # and whitelisted
+ if origin and whitelisted_homeserver(origin):
+ scope = start_active_span_from_request(
+ request, "incoming-federation-request", tags=request_tags
+ )
+ else:
+ scope = start_active_span(
+ "incoming-federation-request", tags=request_tags
+ )
+
+ with scope:
if origin:
with ratelimiter.ratelimit(origin) as d:
await d
@@ -328,7 +342,11 @@ class BaseFederationServlet(object):
continue
server.register_paths(
- method, (pattern,), self._wrap(code), self.__class__.__name__
+ method,
+ (pattern,),
+ self._wrap(code),
+ self.__class__.__name__,
+ trace=False,
)
@@ -557,7 +575,7 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
async def on_PUT(self, origin, content, query, room_id):
content = await self.handler.on_exchange_third_party_invite_request(
- origin, room_id, content
+ room_id, content
)
return 200, content
@@ -756,6 +774,42 @@ class PublicRoomList(BaseFederationServlet):
)
return 200, data
+ async def on_POST(self, origin, content, query):
+ # This implements MSC2197 (Search Filtering over Federation)
+ if not self.allow_access:
+ raise FederationDeniedError(origin)
+
+ limit = int(content.get("limit", 100))
+ since_token = content.get("since", None)
+ search_filter = content.get("filter", None)
+
+ include_all_networks = content.get("include_all_networks", False)
+ third_party_instance_id = content.get("third_party_instance_id", None)
+
+ if include_all_networks:
+ network_tuple = None
+ if third_party_instance_id is not None:
+ raise SynapseError(
+ 400, "Can't use include_all_networks with an explicit network"
+ )
+ elif third_party_instance_id is None:
+ network_tuple = ThirdPartyInstanceID(None, None)
+ else:
+ network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
+
+ if search_filter is None:
+ logger.warning("Nonefilter")
+
+ data = await self.handler.get_local_public_room_list(
+ limit=limit,
+ since_token=since_token,
+ search_filter=search_filter,
+ network_tuple=network_tuple,
+ from_federation=True,
+ )
+
+ return 200, data
+
class FederationVersionServlet(BaseFederationServlet):
PATH = "/version"
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index 14aad8f09d..b4d743cde7 100644
--- a/synapse/federation/units.py
+++ b/synapse/federation/units.py
@@ -38,6 +38,12 @@ class Edu(JsonEncodedObject):
internal_keys = ["origin", "destination"]
+ def get_context(self):
+ return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}")
+
+ def strip_context(self):
+ getattr(self, "content", {})["org.matrix.opentracing_context"] = "{}"
+
class Transaction(JsonEncodedObject):
""" A transaction is a list of Pdus and Edus to be sent to a remote home
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index c29c78bd65..d15c6282fb 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -45,6 +45,7 @@ class BaseHandler(object):
self.state_handler = hs.get_state_handler()
self.distributor = hs.get_distributor()
self.ratelimiter = hs.get_ratelimiter()
+ self.admin_redaction_ratelimiter = hs.get_admin_redaction_ratelimiter()
self.clock = hs.get_clock()
self.hs = hs
@@ -53,7 +54,7 @@ class BaseHandler(object):
self.event_builder_factory = hs.get_event_builder_factory()
@defer.inlineCallbacks
- def ratelimit(self, requester, update=True):
+ def ratelimit(self, requester, update=True, is_admin_redaction=False):
"""Ratelimits requests.
Args:
@@ -62,6 +63,9 @@ class BaseHandler(object):
Set to False when doing multiple checks for one request (e.g.
to check up front if we would reject the request), and set to
True for the last call for a given request.
+ is_admin_redaction (bool): Whether this is a room admin/moderator
+ redacting an event. If so then we may apply different
+ ratelimits depending on config.
Raises:
LimitExceededError if the request should be ratelimited
@@ -90,16 +94,33 @@ class BaseHandler(object):
messages_per_second = override.messages_per_second
burst_count = override.burst_count
else:
- messages_per_second = self.hs.config.rc_message.per_second
- burst_count = self.hs.config.rc_message.burst_count
-
- allowed, time_allowed = self.ratelimiter.can_do_action(
- user_id,
- time_now,
- rate_hz=messages_per_second,
- burst_count=burst_count,
- update=update,
- )
+ # We default to different values if this is an admin redaction and
+ # the config is set
+ if is_admin_redaction and self.hs.config.rc_admin_redaction:
+ messages_per_second = self.hs.config.rc_admin_redaction.per_second
+ burst_count = self.hs.config.rc_admin_redaction.burst_count
+ else:
+ messages_per_second = self.hs.config.rc_message.per_second
+ burst_count = self.hs.config.rc_message.burst_count
+
+ if is_admin_redaction and self.hs.config.rc_admin_redaction:
+ # If we have separate config for admin redactions we use a separate
+ # ratelimiter
+ allowed, time_allowed = self.admin_redaction_ratelimiter.can_do_action(
+ user_id,
+ time_now,
+ rate_hz=messages_per_second,
+ burst_count=burst_count,
+ update=update,
+ )
+ else:
+ allowed, time_allowed = self.ratelimiter.can_do_action(
+ user_id,
+ time_now,
+ rate_hz=messages_per_second,
+ burst_count=burst_count,
+ update=update,
+ )
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now))
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 8acd9f9a83..38bc67191c 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -51,8 +51,8 @@ class AccountDataEventSource(object):
{"type": account_data_type, "content": content, "room_id": room_id}
)
- return (results, current_stream_id)
+ return results, current_stream_id
@defer.inlineCallbacks
def get_pagination_rows(self, user, config, key):
- return ([], config.to_id)
+ return [], config.to_id
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 34574f1a12..f2ae7190c8 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -38,11 +38,14 @@ logger = logging.getLogger(__name__)
class AccountValidityHandler(object):
def __init__(self, hs):
self.hs = hs
+ self.config = hs.config
self.store = self.hs.get_datastore()
self.sendmail = self.hs.get_sendmail()
self.clock = self.hs.get_clock()
self._account_validity = self.hs.config.account_validity
+ self._show_users_in_user_directory = self.hs.config.show_users_in_user_directory
+ self.profile_handler = self.hs.get_profile_handler()
if self._account_validity.renew_by_email_enabled and load_jinja2_templates:
# Don't do email-specific configuration if renewal by email is disabled.
@@ -62,9 +65,14 @@ class AccountValidityHandler(object):
self._raw_from = email.utils.parseaddr(self._from_string)[1]
self._template_html, self._template_text = load_jinja2_templates(
- config=self.hs.config,
- template_html_name=self.hs.config.email_expiry_template_html,
- template_text_name=self.hs.config.email_expiry_template_text,
+ self.config.email_template_dir,
+ [
+ self.config.email_expiry_template_html,
+ self.config.email_expiry_template_text,
+ ],
+ apply_format_ts_filter=True,
+ apply_mxc_to_http_filter=True,
+ public_baseurl=self.config.public_baseurl,
)
# Check the renewal emails to send and send them every 30min.
@@ -77,6 +85,9 @@ class AccountValidityHandler(object):
self.clock.looping_call(send_emails, 30 * 60 * 1000)
+ # Check every hour to remove expired users from the user directory
+ self.clock.looping_call(self._mark_expired_users_as_inactive, 60 * 60 * 1000)
+
@defer.inlineCallbacks
def send_renewal_emails(self):
"""Gets the list of users whose account is expiring in the amount of time
@@ -262,4 +273,27 @@ class AccountValidityHandler(object):
user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent
)
+ # Check if renewed users should be reintroduced to the user directory
+ if self._show_users_in_user_directory:
+ # Show the user in the directory again by setting them to active
+ yield self.profile_handler.set_active(
+ UserID.from_string(user_id), True, True
+ )
+
return expiration_ts
+
+ @defer.inlineCallbacks
+ def _mark_expired_users_as_inactive(self):
+ """Iterate over expired users. Mark them as inactive in order to hide them from the
+ user directory.
+
+ Returns:
+ Deferred
+ """
+ # Get expired users
+ expired_user_ids = yield self.store.get_expired_users()
+ expired_users = [UserID.from_string(user_id) for user_id in expired_user_ids]
+
+ # Mark each one as non-active
+ for user in expired_users:
+ yield self.profile_handler.set_active(user, False, True)
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 2f22f56ca4..1a87b58838 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -94,6 +94,25 @@ class AdminHandler(BaseHandler):
return ret
+ def get_user_server_admin(self, user):
+ """
+ Get the admin bit on a user.
+
+ Args:
+ user_id (UserID): the (necessarily local) user to manipulate
+ """
+ return self.store.is_server_admin(user)
+
+ def set_user_server_admin(self, user, admin):
+ """
+ Set the admin bit on a user.
+
+ Args:
+ user_id (UserID): the (necessarily local) user to manipulate
+ admin (bool): whether or not the user should be an admin of this server
+ """
+ return self.store.set_server_admin(user, admin)
+
@defer.inlineCallbacks
def export_user_data(self, user_id, writer):
"""Write all data we have on the user to the given writer.
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index d1a51df6f9..3e9b298154 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -294,12 +294,10 @@ class ApplicationServicesHandler(object):
# we don't know if they are unknown or not since it isn't one of our
# users. We can't poke ASes.
return False
- return
user_info = yield self.store.get_user_by_id(user_id)
if user_info:
return False
- return
# user not found; could be the AS though, so check.
services = self.store.get_app_services()
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 0f3ebf7ef8..387a0d9684 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -38,6 +38,7 @@ from synapse.api.errors import (
UserDeactivatedError,
)
from synapse.api.ratelimiting import Ratelimiter
+from synapse.config.emailconfig import ThreepidBehaviour
from synapse.logging.context import defer_to_thread
from synapse.module_api import ModuleApi
from synapse.types import UserID
@@ -158,7 +159,7 @@ class AuthHandler(BaseHandler):
return params
@defer.inlineCallbacks
- def check_auth(self, flows, clientdict, clientip, password_servlet=False):
+ def check_auth(self, flows, clientdict, clientip):
"""
Takes a dictionary sent by the client in the login / registration
protocol and handles the User-Interactive Auth flow.
@@ -182,16 +183,6 @@ class AuthHandler(BaseHandler):
clientip (str): The IP address of the client.
- password_servlet (bool): Whether the request originated from
- PasswordRestServlet.
- XXX: This is a temporary hack to distinguish between checking
- for threepid validations locally (in the case of password
- resets) and using the identity server (in the case of binding
- a 3PID during registration). Once we start using the
- homeserver for both tasks, this distinction will no longer be
- necessary.
-
-
Returns:
defer.Deferred[dict, dict, str]: a deferred tuple of
(creds, params, session_id).
@@ -247,9 +238,7 @@ class AuthHandler(BaseHandler):
if "type" in authdict:
login_type = authdict["type"]
try:
- result = yield self._check_auth_dict(
- authdict, clientip, password_servlet=password_servlet
- )
+ result = yield self._check_auth_dict(authdict, clientip)
if result:
creds[login_type] = result
self._save_session(session)
@@ -280,7 +269,7 @@ class AuthHandler(BaseHandler):
creds,
list(clientdict),
)
- return (creds, clientdict, session["id"])
+ return creds, clientdict, session["id"]
ret = self._auth_dict_for_flows(flows, session)
ret["completed"] = list(creds)
@@ -356,7 +345,7 @@ class AuthHandler(BaseHandler):
return sess.setdefault("serverdict", {}).get(key, default)
@defer.inlineCallbacks
- def _check_auth_dict(self, authdict, clientip, password_servlet=False):
+ def _check_auth_dict(self, authdict, clientip):
"""Attempt to validate the auth dict provided by a client
Args:
@@ -374,11 +363,7 @@ class AuthHandler(BaseHandler):
login_type = authdict["type"]
checker = self.checkers.get(login_type)
if checker is not None:
- # XXX: Temporary workaround for having Synapse handle password resets
- # See AuthHandler.check_auth for further details
- res = yield checker(
- authdict, clientip=clientip, password_servlet=password_servlet
- )
+ res = yield checker(authdict, clientip=clientip)
return res
# build a v1-login-style dict out of the authdict and fall back to the
@@ -409,7 +394,7 @@ class AuthHandler(BaseHandler):
# TODO: get this from the homeserver rather than creating a new one for
# each request
try:
- client = self.hs.get_simple_http_client()
+ client = self.hs.get_proxied_http_client()
resp_body = yield client.post_urlencoded_get_json(
self.hs.config.recaptcha_siteverify_api,
args={
@@ -449,7 +434,7 @@ class AuthHandler(BaseHandler):
return defer.succeed(True)
@defer.inlineCallbacks
- def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs):
+ def _check_threepid(self, medium, authdict, **kwargs):
if "threepid_creds" not in authdict:
raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
@@ -458,12 +443,18 @@ class AuthHandler(BaseHandler):
identity_handler = self.hs.get_handlers().identity_handler
logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
- if (
- not password_servlet
- or self.hs.config.email_password_reset_behaviour == "remote"
- ):
- threepid = yield identity_handler.threepid_from_creds(threepid_creds)
- elif self.hs.config.email_password_reset_behaviour == "local":
+ if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ if medium == "email":
+ threepid = yield identity_handler.threepid_from_creds(
+ self.hs.config.account_threepid_delegate_email, threepid_creds
+ )
+ elif medium == "msisdn":
+ threepid = yield identity_handler.threepid_from_creds(
+ self.hs.config.account_threepid_delegate_msisdn, threepid_creds
+ )
+ else:
+ raise SynapseError(400, "Unrecognized threepid medium: %s" % (medium,))
+ elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
row = yield self.store.get_threepid_validation_session(
medium,
threepid_creds["client_secret"],
@@ -722,7 +713,7 @@ class AuthHandler(BaseHandler):
known_login_type = True
is_valid = yield provider.check_password(qualified_user_id, password)
if is_valid:
- return (qualified_user_id, None)
+ return qualified_user_id, None
if not hasattr(provider, "get_supported_login_types") or not hasattr(
provider, "check_auth"
@@ -766,7 +757,7 @@ class AuthHandler(BaseHandler):
)
if canonical_user_id:
- return (canonical_user_id, None)
+ return canonical_user_id, None
if not known_login_type:
raise SynapseError(400, "Unknown login type %s" % login_type)
@@ -816,7 +807,7 @@ class AuthHandler(BaseHandler):
result = (result, None)
return result
- return (None, None)
+ return None, None
@defer.inlineCallbacks
def _check_local_password(self, user_id, password):
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 5f804d1f13..ad00dcecfd 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -35,6 +35,7 @@ class DeactivateAccountHandler(BaseHandler):
self._device_handler = hs.get_device_handler()
self._room_member_handler = hs.get_room_member_handler()
self._identity_handler = hs.get_handlers().identity_handler
+ self._profile_handler = hs.get_profile_handler()
self.user_directory_handler = hs.get_user_directory_handler()
# Flag that indicates whether the process to part users from rooms is running
@@ -102,6 +103,9 @@ class DeactivateAccountHandler(BaseHandler):
yield self.store.user_set_password_hash(user_id, None)
+ user = UserID.from_string(user_id)
+ yield self._profile_handler.set_active(user, False, False)
+
# Add the user to a table of users pending deactivation (ie.
# removal from all the rooms they're a member of)
yield self.store.add_user_pending_deactivation(user_id)
@@ -118,6 +122,10 @@ class DeactivateAccountHandler(BaseHandler):
# parts users from rooms (if it isn't already running)
self._start_user_parting()
+ # Reject all pending invites for the user, so that the user doesn't show up in the
+ # "invited" section of rooms' members list.
+ yield self._reject_pending_invites_for_user(user_id)
+
# Remove all information on the user from the account_validity table.
if self._account_validity_enabled:
yield self.store.delete_account_validity_for_user(user_id)
@@ -127,6 +135,39 @@ class DeactivateAccountHandler(BaseHandler):
return identity_server_supports_unbinding
+ @defer.inlineCallbacks
+ def _reject_pending_invites_for_user(self, user_id):
+ """Reject pending invites addressed to a given user ID.
+
+ Args:
+ user_id (str): The user ID to reject pending invites for.
+ """
+ user = UserID.from_string(user_id)
+ pending_invites = yield self.store.get_invited_rooms_for_user(user_id)
+
+ for room in pending_invites:
+ try:
+ yield self._room_member_handler.update_membership(
+ create_requester(user),
+ user,
+ room.room_id,
+ "leave",
+ ratelimit=False,
+ require_consent=False,
+ )
+ logger.info(
+ "Rejected invite for deactivated user %r in room %r",
+ user_id,
+ room.room_id,
+ )
+ except Exception:
+ logger.exception(
+ "Failed to reject invite for user %r in room %r:"
+ " ignoring and continuing",
+ user_id,
+ room.room_id,
+ )
+
def _start_user_parting(self):
"""
Start the process that goes through the table of users
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 5c1cf83c9d..71a8f33da3 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -25,6 +25,7 @@ from synapse.api.errors import (
HttpResponseException,
RequestSendFailed,
)
+from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.types import RoomStreamToken, get_domain_from_id
from synapse.util import stringutils
from synapse.util.async_helpers import Linearizer
@@ -45,6 +46,7 @@ class DeviceWorkerHandler(BaseHandler):
self.state = hs.get_state_handler()
self._auth_handler = hs.get_auth_handler()
+ @trace
@defer.inlineCallbacks
def get_devices_by_user(self, user_id):
"""
@@ -56,6 +58,7 @@ class DeviceWorkerHandler(BaseHandler):
defer.Deferred: list[dict[str, X]]: info on each device
"""
+ set_tag("user_id", user_id)
device_map = yield self.store.get_devices_by_user(user_id)
ips = yield self.store.get_last_client_ip_by_device(user_id, device_id=None)
@@ -64,8 +67,10 @@ class DeviceWorkerHandler(BaseHandler):
for device in devices:
_update_device_from_client_ips(device, ips)
+ log_kv(device_map)
return devices
+ @trace
@defer.inlineCallbacks
def get_device(self, user_id, device_id):
""" Retrieve the given device
@@ -85,9 +90,14 @@ class DeviceWorkerHandler(BaseHandler):
raise errors.NotFoundError
ips = yield self.store.get_last_client_ip_by_device(user_id, device_id)
_update_device_from_client_ips(device, ips)
+
+ set_tag("device", device)
+ set_tag("ips", ips)
+
return device
@measure_func("device.get_user_ids_changed")
+ @trace
@defer.inlineCallbacks
def get_user_ids_changed(self, user_id, from_token):
"""Get list of users that have had the devices updated, or have newly
@@ -97,6 +107,9 @@ class DeviceWorkerHandler(BaseHandler):
user_id (str)
from_token (StreamToken)
"""
+
+ set_tag("user_id", user_id)
+ set_tag("from_token", from_token)
now_room_key = yield self.store.get_room_events_max_id()
room_ids = yield self.store.get_rooms_for_user(user_id)
@@ -148,6 +161,9 @@ class DeviceWorkerHandler(BaseHandler):
# special-case for an empty prev state: include all members
# in the changed list
if not event_ids:
+ log_kv(
+ {"event": "encountered empty previous state", "room_id": room_id}
+ )
for key, event_id in iteritems(current_state_ids):
etype, state_key = key
if etype != EventTypes.Member:
@@ -200,7 +216,11 @@ class DeviceWorkerHandler(BaseHandler):
possibly_joined = []
possibly_left = []
- return {"changed": list(possibly_joined), "left": list(possibly_left)}
+ result = {"changed": list(possibly_joined), "left": list(possibly_left)}
+
+ log_kv(result)
+
+ return result
class DeviceHandler(DeviceWorkerHandler):
@@ -267,6 +287,7 @@ class DeviceHandler(DeviceWorkerHandler):
raise errors.StoreError(500, "Couldn't generate a device ID.")
+ @trace
@defer.inlineCallbacks
def delete_device(self, user_id, device_id):
""" Delete the given device
@@ -284,6 +305,10 @@ class DeviceHandler(DeviceWorkerHandler):
except errors.StoreError as e:
if e.code == 404:
# no match
+ set_tag("error", True)
+ log_kv(
+ {"reason": "User doesn't have device id.", "device_id": device_id}
+ )
pass
else:
raise
@@ -296,6 +321,7 @@ class DeviceHandler(DeviceWorkerHandler):
yield self.notify_device_update(user_id, [device_id])
+ @trace
@defer.inlineCallbacks
def delete_all_devices_for_user(self, user_id, except_device_id=None):
"""Delete all of the user's devices
@@ -331,6 +357,8 @@ class DeviceHandler(DeviceWorkerHandler):
except errors.StoreError as e:
if e.code == 404:
# no match
+ set_tag("error", True)
+ set_tag("reason", "User doesn't have that device id.")
pass
else:
raise
@@ -371,6 +399,7 @@ class DeviceHandler(DeviceWorkerHandler):
else:
raise
+ @trace
@measure_func("notify_device_update")
@defer.inlineCallbacks
def notify_device_update(self, user_id, device_ids):
@@ -386,6 +415,8 @@ class DeviceHandler(DeviceWorkerHandler):
hosts.update(get_domain_from_id(u) for u in users_who_share_room)
hosts.discard(self.server_name)
+ set_tag("target_hosts", hosts)
+
position = yield self.store.add_device_change_to_streams(
user_id, device_ids, list(hosts)
)
@@ -405,6 +436,7 @@ class DeviceHandler(DeviceWorkerHandler):
)
for host in hosts:
self.federation_sender.send_device_messages(host)
+ log_kv({"message": "sent device update to host", "host": host})
@defer.inlineCallbacks
def on_federation_query_user_devices(self, user_id):
@@ -451,12 +483,15 @@ class DeviceListUpdater(object):
iterable=True,
)
+ @trace
@defer.inlineCallbacks
def incoming_device_list_update(self, origin, edu_content):
"""Called on incoming device list update from federation. Responsible
for parsing the EDU and adding to pending updates list.
"""
+ set_tag("origin", origin)
+ set_tag("edu_content", edu_content)
user_id = edu_content.pop("user_id")
device_id = edu_content.pop("device_id")
stream_id = str(edu_content.pop("stream_id")) # They may come as ints
@@ -471,12 +506,30 @@ class DeviceListUpdater(object):
device_id,
origin,
)
+
+ set_tag("error", True)
+ log_kv(
+ {
+ "message": "Got a device list update edu from a user and "
+ "device which does not match the origin of the request.",
+ "user_id": user_id,
+ "device_id": device_id,
+ }
+ )
return
room_ids = yield self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
# probably won't get any further updates.
+ set_tag("error", True)
+ log_kv(
+ {
+ "message": "Got an update from a user for which "
+ "we don't share any rooms",
+ "other user_id": user_id,
+ }
+ )
logger.warning(
"Got device list update edu for %r/%r, but don't share a room",
user_id,
@@ -578,6 +631,7 @@ class DeviceListUpdater(object):
request:
https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
"""
+ log_kv({"message": "Doing resync to update device list."})
# Fetch all devices for the user.
origin = get_domain_from_id(user_id)
try:
@@ -594,13 +648,20 @@ class DeviceListUpdater(object):
# eventually become consistent.
return
except FederationDeniedError as e:
+ set_tag("error", True)
+ log_kv({"reason": "FederationDeniedError"})
logger.info(e)
return
- except Exception:
+ except Exception as e:
# TODO: Remember that we are now out of sync and try again
# later
+ set_tag("error", True)
+ log_kv(
+ {"message": "Exception raised by federation request", "exception": e}
+ )
logger.exception("Failed to handle device list update for %s", user_id)
return
+ log_kv({"result": result})
stream_id = result["stream_id"]
devices = result["devices"]
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index e1ebb6346c..0043cbea17 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -15,9 +15,17 @@
import logging
+from canonicaljson import json
+
from twisted.internet import defer
from synapse.api.errors import SynapseError
+from synapse.logging.opentracing import (
+ get_active_span_text_map,
+ log_kv,
+ set_tag,
+ start_active_span,
+)
from synapse.types import UserID, get_domain_from_id
from synapse.util.stringutils import random_string
@@ -78,7 +86,8 @@ class DeviceMessageHandler(object):
@defer.inlineCallbacks
def send_device_message(self, sender_user_id, message_type, messages):
-
+ set_tag("number_of_messages", len(messages))
+ set_tag("sender", sender_user_id)
local_messages = {}
remote_messages = {}
for user_id, by_device in messages.items():
@@ -100,15 +109,21 @@ class DeviceMessageHandler(object):
message_id = random_string(16)
+ context = get_active_span_text_map()
+
remote_edu_contents = {}
for destination, messages in remote_messages.items():
- remote_edu_contents[destination] = {
- "messages": messages,
- "sender": sender_user_id,
- "type": message_type,
- "message_id": message_id,
- }
+ with start_active_span("to_device_for_user"):
+ set_tag("destination", destination)
+ remote_edu_contents[destination] = {
+ "messages": messages,
+ "sender": sender_user_id,
+ "type": message_type,
+ "message_id": message_id,
+ "org.matrix.opentracing_context": json.dumps(context),
+ }
+ log_kv({"local_messages": local_messages})
stream_id = yield self.store.add_messages_to_device_inbox(
local_messages, remote_edu_contents
)
@@ -117,6 +132,7 @@ class DeviceMessageHandler(object):
"to_device_key", stream_id, users=local_messages.keys()
)
+ log_kv({"remote_messages": remote_messages})
for destination in remote_messages.keys():
# Enqueue a new federation transaction to send the new
# device messages to each remote destination.
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 1f90b0d278..056fb97acb 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -24,6 +24,7 @@ from twisted.internet import defer
from synapse.api.errors import CodeMessageException, SynapseError
from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
from synapse.types import UserID, get_domain_from_id
from synapse.util import unwrapFirstError
from synapse.util.retryutils import NotRetryingDestination
@@ -46,6 +47,7 @@ class E2eKeysHandler(object):
"client_keys", self.on_federation_query_client_keys
)
+ @trace
@defer.inlineCallbacks
def query_devices(self, query_body, timeout):
""" Handle a device key query from a client
@@ -81,6 +83,9 @@ class E2eKeysHandler(object):
else:
remote_queries[user_id] = device_ids
+ set_tag("local_key_query", local_query)
+ set_tag("remote_key_query", remote_queries)
+
# First get local devices.
failures = {}
results = {}
@@ -121,6 +126,7 @@ class E2eKeysHandler(object):
r[user_id] = remote_queries[user_id]
# Now fetch any devices that we don't have in our cache
+ @trace
@defer.inlineCallbacks
def do_remote_query(destination):
"""This is called when we are querying the device list of a user on
@@ -185,6 +191,8 @@ class E2eKeysHandler(object):
except Exception as e:
failure = _exception_to_failure(e)
failures[destination] = failure
+ set_tag("error", True)
+ set_tag("reason", failure)
yield make_deferred_yieldable(
defer.gatherResults(
@@ -198,6 +206,7 @@ class E2eKeysHandler(object):
return {"device_keys": results, "failures": failures}
+ @trace
@defer.inlineCallbacks
def query_local_devices(self, query):
"""Get E2E device keys for local users
@@ -210,6 +219,7 @@ class E2eKeysHandler(object):
defer.Deferred: (resolves to dict[string, dict[string, dict]]):
map from user_id -> device_id -> device details
"""
+ set_tag("local_query", query)
local_query = []
result_dict = {}
@@ -217,6 +227,14 @@ class E2eKeysHandler(object):
# we use UserID.from_string to catch invalid user ids
if not self.is_mine(UserID.from_string(user_id)):
logger.warning("Request for keys for non-local user %s", user_id)
+ log_kv(
+ {
+ "message": "Requested a local key for a user which"
+ " was not local to the homeserver",
+ "user_id": user_id,
+ }
+ )
+ set_tag("error", True)
raise SynapseError(400, "Not a user here")
if not device_ids:
@@ -241,6 +259,7 @@ class E2eKeysHandler(object):
r["unsigned"]["device_display_name"] = display_name
result_dict[user_id][device_id] = r
+ log_kv(results)
return result_dict
@defer.inlineCallbacks
@@ -251,6 +270,7 @@ class E2eKeysHandler(object):
res = yield self.query_local_devices(device_keys_query)
return {"device_keys": res}
+ @trace
@defer.inlineCallbacks
def claim_one_time_keys(self, query, timeout):
local_query = []
@@ -265,6 +285,9 @@ class E2eKeysHandler(object):
domain = get_domain_from_id(user_id)
remote_queries.setdefault(domain, {})[user_id] = device_keys
+ set_tag("local_key_query", local_query)
+ set_tag("remote_key_query", remote_queries)
+
results = yield self.store.claim_e2e_one_time_keys(local_query)
json_result = {}
@@ -276,8 +299,10 @@ class E2eKeysHandler(object):
key_id: json.loads(json_bytes)
}
+ @trace
@defer.inlineCallbacks
def claim_client_keys(destination):
+ set_tag("destination", destination)
device_keys = remote_queries[destination]
try:
remote_result = yield self.federation.claim_client_keys(
@@ -290,6 +315,8 @@ class E2eKeysHandler(object):
except Exception as e:
failure = _exception_to_failure(e)
failures[destination] = failure
+ set_tag("error", True)
+ set_tag("reason", failure)
yield make_deferred_yieldable(
defer.gatherResults(
@@ -313,9 +340,11 @@ class E2eKeysHandler(object):
),
)
+ log_kv({"one_time_keys": json_result, "failures": failures})
return {"one_time_keys": json_result, "failures": failures}
@defer.inlineCallbacks
+ @tag_args
def upload_keys_for_user(self, user_id, device_id, keys):
time_now = self.clock.time_msec()
@@ -329,6 +358,13 @@ class E2eKeysHandler(object):
user_id,
time_now,
)
+ log_kv(
+ {
+ "message": "Updating device_keys for user.",
+ "user_id": user_id,
+ "device_id": device_id,
+ }
+ )
# TODO: Sign the JSON with the server key
changed = yield self.store.set_e2e_device_keys(
user_id, device_id, time_now, device_keys
@@ -336,12 +372,24 @@ class E2eKeysHandler(object):
if changed:
# Only notify about device updates *if* the keys actually changed
yield self.device_handler.notify_device_update(user_id, [device_id])
-
+ else:
+ log_kv({"message": "Not updating device_keys for user", "user_id": user_id})
one_time_keys = keys.get("one_time_keys", None)
if one_time_keys:
+ log_kv(
+ {
+ "message": "Updating one_time_keys for device.",
+ "user_id": user_id,
+ "device_id": device_id,
+ }
+ )
yield self._upload_one_time_keys_for_user(
user_id, device_id, time_now, one_time_keys
)
+ else:
+ log_kv(
+ {"message": "Did not update one_time_keys", "reason": "no keys given"}
+ )
# the device should have been registered already, but it may have been
# deleted due to a race with a DELETE request. Or we may be using an
@@ -352,6 +400,7 @@ class E2eKeysHandler(object):
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
+ set_tag("one_time_key_counts", result)
return {"one_time_key_counts": result}
@defer.inlineCallbacks
@@ -395,6 +444,7 @@ class E2eKeysHandler(object):
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
)
+ log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
yield self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 41b871fc59..a9d80f708c 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -26,6 +26,7 @@ from synapse.api.errors import (
StoreError,
SynapseError,
)
+from synapse.logging.opentracing import log_kv, trace
from synapse.util.async_helpers import Linearizer
logger = logging.getLogger(__name__)
@@ -49,6 +50,7 @@ class E2eRoomKeysHandler(object):
# changed.
self._upload_linearizer = Linearizer("upload_room_keys_lock")
+ @trace
@defer.inlineCallbacks
def get_room_keys(self, user_id, version, room_id=None, session_id=None):
"""Bulk get the E2E room keys for a given backup, optionally filtered to a given
@@ -84,8 +86,10 @@ class E2eRoomKeysHandler(object):
user_id, version, room_id, session_id
)
+ log_kv(results)
return results
+ @trace
@defer.inlineCallbacks
def delete_room_keys(self, user_id, version, room_id=None, session_id=None):
"""Bulk delete the E2E room keys for a given backup, optionally filtered to a given
@@ -107,6 +111,7 @@ class E2eRoomKeysHandler(object):
with (yield self._upload_linearizer.queue(user_id)):
yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id)
+ @trace
@defer.inlineCallbacks
def upload_room_keys(self, user_id, version, room_keys):
"""Bulk upload a list of room keys into a given backup version, asserting
@@ -186,7 +191,14 @@ class E2eRoomKeysHandler(object):
session_id(str): the session whose room_key we're setting
room_key(dict): the room_key being set
"""
-
+ log_kv(
+ {
+ "message": "Trying to upload room key",
+ "room_id": room_id,
+ "session_id": session_id,
+ "user_id": user_id,
+ }
+ )
# get the room_key for this particular row
current_room_key = None
try:
@@ -195,14 +207,23 @@ class E2eRoomKeysHandler(object):
)
except StoreError as e:
if e.code == 404:
- pass
+ log_kv(
+ {
+ "message": "Room key not found.",
+ "room_id": room_id,
+ "user_id": user_id,
+ }
+ )
else:
raise
if self._should_replace_room_key(current_room_key, room_key):
+ log_kv({"message": "Replacing room key."})
yield self.store.set_e2e_room_key(
user_id, version, room_id, session_id, room_key
)
+ else:
+ log_kv({"message": "Not replacing room_key."})
@staticmethod
def _should_replace_room_key(current_room_key, room_key):
@@ -236,6 +257,7 @@ class E2eRoomKeysHandler(object):
return False
return True
+ @trace
@defer.inlineCallbacks
def create_version(self, user_id, version_info):
"""Create a new backup version. This automatically becomes the new
@@ -294,6 +316,7 @@ class E2eRoomKeysHandler(object):
raise
return res
+ @trace
@defer.inlineCallbacks
def delete_version(self, user_id, version=None):
"""Deletes a given version of the user's e2e_room_keys backup
@@ -314,6 +337,7 @@ class E2eRoomKeysHandler(object):
else:
raise
+ @trace
@defer.inlineCallbacks
def update_version(self, user_id, version, version_info):
"""Update the info about a given version of the user's backup
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 2f1f10a9af..5e748687e3 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -167,7 +167,6 @@ class EventHandler(BaseHandler):
if not event:
return None
- return
users = yield self.store.get_users_in_room(event.room_id)
is_peeking = user.to_string() not in users
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index c86903b98b..d616cbf101 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -326,8 +326,9 @@ class FederationHandler(BaseHandler):
ours = yield self.store.get_state_groups_ids(room_id, seen)
# state_maps is a list of mappings from (type, state_key) to event_id
- # type: list[dict[tuple[str, str], str]]
- state_maps = list(ours.values())
+ state_maps = list(
+ ours.values()
+ ) # type: list[dict[tuple[str, str], str]]
# we don't need this any more, let's delete it.
del ours
@@ -1348,8 +1349,15 @@ class FederationHandler(BaseHandler):
if self.hs.config.block_non_admin_invites:
raise SynapseError(403, "This server does not accept room invites")
+ is_published = yield self.store.is_room_published(event.room_id)
+
if not self.spam_checker.user_may_invite(
- event.sender, event.state_key, event.room_id
+ event.sender,
+ event.state_key,
+ None,
+ room_id=event.room_id,
+ new_room=False,
+ published_room=is_published,
):
raise SynapseError(
403, "This user is not permitted to send invites to this server/user"
@@ -1427,7 +1435,7 @@ class FederationHandler(BaseHandler):
assert event.user_id == user_id
assert event.state_key == user_id
assert event.room_id == room_id
- return (origin, event, format_ver)
+ return origin, event, format_ver
@defer.inlineCallbacks
@log_function
@@ -2506,7 +2514,7 @@ class FederationHandler(BaseHandler):
room_version, event_dict, event, context
)
- EventValidator().validate_new(event)
+ EventValidator().validate_new(event, self.config)
# We need to tell the transaction queue to send this out, even
# though the sender isn't a local user.
@@ -2529,12 +2537,17 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
@log_function
- def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
+ def on_exchange_third_party_invite_request(self, room_id, event_dict):
"""Handle an exchange_third_party_invite request from a remote server
The remote server will call this when it wants to turn a 3pid invite
into a normal m.room.member invite.
+ Args:
+ room_id (str): The ID of the room.
+
+ event_dict (dict[str, Any]): Dictionary containing the event body.
+
Returns:
Deferred: resolves (to None)
"""
@@ -2564,7 +2577,7 @@ class FederationHandler(BaseHandler):
)
try:
- self.auth.check_from_context(room_version, event, context)
+ yield self.auth.check_from_context(room_version, event, context)
except AuthError as e:
logger.warn("Denying third party invite %r because %s", event, e)
raise e
@@ -2593,7 +2606,12 @@ class FederationHandler(BaseHandler):
original_invite_id, allow_none=True
)
if original_invite:
- display_name = original_invite.content["display_name"]
+ # If the m.room.third_party_invite event's content is empty, it means the
+ # invite has been revoked. In this case, we don't have to raise an error here
+ # because the auth check will fail on the invite (because it's not able to
+ # fetch public keys from the m.room.third_party_invite event's content, which
+ # is empty).
+ display_name = original_invite.content.get("display_name")
event_dict["content"]["third_party_invite"]["display_name"] = display_name
else:
logger.info(
@@ -2608,7 +2626,7 @@ class FederationHandler(BaseHandler):
event, context = yield self.event_creation_handler.create_new_client_event(
builder=builder
)
- EventValidator().validate_new(event)
+ EventValidator().validate_new(event, self.config)
return (event, context)
@defer.inlineCallbacks
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index d199521b58..894b2e0c9d 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
-# Copyright 2018 New Vector Ltd
+# Copyright 2018, 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,15 +20,21 @@
import logging
from canonicaljson import json
+from signedjson.key import decode_verify_key_bytes
+from signedjson.sign import verify_signed_json
+from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse.api.errors import (
+ AuthError,
CodeMessageException,
Codes,
HttpResponseException,
+ ProxiedRequestError,
SynapseError,
)
+from synapse.util.stringutils import random_string
from ._base import BaseHandler
@@ -39,6 +45,7 @@ class IdentityHandler(BaseHandler):
def __init__(self, hs):
super(IdentityHandler, self).__init__(hs)
+ self.hs = hs
self.http_client = hs.get_simple_http_client()
self.federation_http_client = hs.get_http_client()
@@ -46,81 +53,146 @@ class IdentityHandler(BaseHandler):
self.trust_any_id_server_just_for_testing_do_not_use = (
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
)
+ self.rewrite_identity_server_urls = hs.config.rewrite_identity_server_urls
+ self._enable_lookup = hs.config.enable_3pid_lookup
- def _should_trust_id_server(self, id_server):
- if id_server not in self.trusted_id_servers:
- if self.trust_any_id_server_just_for_testing_do_not_use:
- logger.warn(
- "Trusting untrustworthy ID server %r even though it isn't"
- " in the trusted id list for testing because"
- " 'use_insecure_ssl_client_just_for_testing_do_not_use'"
- " is set in the config",
- id_server,
- )
- else:
- return False
- return True
+ def _extract_items_from_creds_dict(self, creds):
+ """
+ Retrieve entries from a "credentials" dictionary
- @defer.inlineCallbacks
- def threepid_from_creds(self, creds):
- if "id_server" in creds:
- id_server = creds["id_server"]
- elif "idServer" in creds:
- id_server = creds["idServer"]
- else:
- raise SynapseError(400, "No id_server in creds")
+ Args:
+ creds (dict[str, str]): Dictionary of credentials that contain the following keys:
+ * client_secret|clientSecret: A unique secret str provided by the client
+ * id_server|idServer: the domain of the identity server to query
+ * id_access_token: The access token to authenticate to the identity
+ server with.
- if "client_secret" in creds:
- client_secret = creds["client_secret"]
- elif "clientSecret" in creds:
- client_secret = creds["clientSecret"]
- else:
- raise SynapseError(400, "No client_secret in creds")
+ Returns:
+ tuple(str, str, str|None): A tuple containing the client_secret, the id_server,
+ and the id_access_token value if available.
+ """
+ client_secret = creds.get("client_secret") or creds.get("clientSecret")
+ if not client_secret:
+ raise SynapseError(
+ 400, "No client_secret in creds", errcode=Codes.MISSING_PARAM
+ )
- if not self._should_trust_id_server(id_server):
- logger.warn(
- "%s is not a trusted ID server: rejecting 3pid " + "credentials",
- id_server,
+ id_server = creds.get("id_server") or creds.get("idServer")
+ if not id_server:
+ raise SynapseError(
+ 400, "No id_server in creds", errcode=Codes.MISSING_PARAM
)
- return None
- try:
- data = yield self.http_client.get_json(
- "https://%s%s"
- % (id_server, "/_matrix/identity/api/v1/3pid/getValidated3pid"),
- {"sid": creds["sid"], "client_secret": client_secret},
+ id_access_token = creds.get("id_access_token")
+ return client_secret, id_server, id_access_token
+
+ @defer.inlineCallbacks
+ def threepid_from_creds(self, id_server, creds):
+ """
+ Retrieve and validate a threepid identifier from a "credentials" dictionary against a
+ given identity server
+
+ Args:
+ id_server (str|None): The identity server to validate 3PIDs against. If None,
+ we will attempt to extract id_server creds
+
+ creds (dict[str, str]): Dictionary containing the following keys:
+ * id_server|idServer: An optional domain name of an identity server
+ * client_secret|clientSecret: A unique secret str provided by the client
+ * sid: The ID of the validation session
+
+ Returns:
+ Deferred[dict[str,str|int]|None]: A dictionary consisting of response params to
+ the /getValidated3pid endpoint of the Identity Service API, or None if the
+ threepid was not found
+ """
+ client_secret = creds.get("client_secret") or creds.get("clientSecret")
+ if not client_secret:
+ raise SynapseError(
+ 400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM
)
- except HttpResponseException as e:
- logger.info("getValidated3pid failed with Matrix error: %r", e)
- raise e.to_synapse_error()
+ session_id = creds.get("sid")
+ if not session_id:
+ raise SynapseError(
+ 400, "Missing param session_id in creds", errcode=Codes.MISSING_PARAM
+ )
+ if not id_server:
+ # Attempt to get the id_server from the creds dict
+ id_server = creds.get("id_server") or creds.get("idServer")
+ if not id_server:
+ raise SynapseError(
+ 400, "Missing param id_server in creds", errcode=Codes.MISSING_PARAM
+ )
- if "medium" in data:
- return data
- return None
+ query_params = {"sid": session_id, "client_secret": client_secret}
+
+ # if we have a rewrite rule set for the identity server,
+ # apply it now.
+ if id_server in self.rewrite_identity_server_urls:
+ id_server = self.rewrite_identity_server_urls[id_server]
+
+ url = "https://%s%s" % (
+ id_server,
+ "/_matrix/identity/api/v1/3pid/getValidated3pid",
+ )
+
+ data = yield self.http_client.get_json(url, query_params)
+ return data if "medium" in data else None
@defer.inlineCallbacks
- def bind_threepid(self, creds, mxid):
+ def bind_threepid(self, creds, mxid, use_v2=True):
+ """Bind a 3PID to an identity server
+
+ Args:
+ creds (dict[str, str]): Dictionary of credentials that contain the following keys:
+ * client_secret|clientSecret: A unique secret str provided by the client
+ * id_server|idServer: the domain of the identity server to query
+ * id_access_token: The access token to authenticate to the identity
+ server with. Required if use_v2 is true
+ mxid (str): The MXID to bind the 3PID to
+ use_v2 (bool): Whether to use v2 Identity Service API endpoints
+
+ Returns:
+ Deferred[dict]: The response from the identity server
+ """
logger.debug("binding threepid %r to %s", creds, mxid)
- data = None
- if "id_server" in creds:
- id_server = creds["id_server"]
- elif "idServer" in creds:
- id_server = creds["idServer"]
- else:
- raise SynapseError(400, "No id_server in creds")
+ client_secret, id_server, id_access_token = self._extract_items_from_creds_dict(
+ creds
+ )
+
+ sid = creds.get("sid")
+ if not sid:
+ raise SynapseError(
+ 400, "No sid in three_pid_creds", errcode=Codes.MISSING_PARAM
+ )
- if "client_secret" in creds:
- client_secret = creds["client_secret"]
- elif "clientSecret" in creds:
- client_secret = creds["clientSecret"]
+ # If an id_access_token is not supplied, force usage of v1
+ if id_access_token is None:
+ use_v2 = False
+
+ # if we have a rewrite rule set for the identity server,
+ # apply it now, but only for sending the request (not
+ # storing in the database).
+ if id_server in self.rewrite_identity_server_urls:
+ id_server_host = self.rewrite_identity_server_urls[id_server]
+ else:
+ id_server_host = id_server
+
+ # Decide which API endpoint URLs to use
+ headers = {}
+ bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid}
+ if use_v2:
+ bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server_host,)
+ headers["Authorization"] = self.create_id_access_token_header(
+ id_access_token
+ )
else:
- raise SynapseError(400, "No client_secret in creds")
+ bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server_host,)
try:
data = yield self.http_client.post_json_get_json(
- "https://%s%s" % (id_server, "/_matrix/identity/api/v1/3pid/bind"),
- {"sid": creds["sid"], "client_secret": client_secret, "mxid": mxid},
+ bind_url, bind_data, headers=headers
)
logger.debug("bound threepid %r to %s", creds, mxid)
@@ -131,13 +203,23 @@ class IdentityHandler(BaseHandler):
address=data["address"],
id_server=id_server,
)
+
+ return data
+ except HttpResponseException as e:
+ if e.code != 404 or not use_v2:
+ logger.error("3PID bind failed with Matrix error: %r", e)
+ raise e.to_synapse_error()
except CodeMessageException as e:
data = json.loads(e.msg) # XXX WAT?
- return data
+ return data
+
+ logger.info("Got 404 when POSTing JSON %s, falling back to v1 URL", bind_url)
+ return (yield self.bind_threepid(creds, mxid, use_v2=False))
@defer.inlineCallbacks
def try_unbind_threepid(self, mxid, threepid):
- """Removes a binding from an identity server
+ """Attempt to remove a 3PID from an identity server, or if one is not provided, all
+ identity servers we're aware the binding is present on
Args:
mxid (str): Matrix user ID of binding to be removed
@@ -188,6 +270,8 @@ class IdentityHandler(BaseHandler):
server doesn't support unbinding
"""
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
+ url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii")
+
content = {
"mxid": mxid,
"threepid": {"medium": threepid["medium"], "address": threepid["address"]},
@@ -199,12 +283,22 @@ class IdentityHandler(BaseHandler):
auth_headers = self.federation_http_client.build_auth_headers(
destination=None,
method="POST",
- url_bytes="/_matrix/identity/api/v1/3pid/unbind".encode("ascii"),
+ url_bytes=url_bytes,
content=content,
destination_is=id_server,
)
headers = {b"Authorization": auth_headers}
+ # if we have a rewrite rule set for the identity server,
+ # apply it now.
+ #
+ # Note that destination_is has to be the real id_server, not
+ # the server we connect to.
+ if id_server in self.rewrite_identity_server_urls:
+ id_server = self.rewrite_identity_server_urls[id_server]
+
+ url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
+
try:
yield self.http_client.post_json_get_json(url, content, headers)
changed = True
@@ -227,27 +321,127 @@ class IdentityHandler(BaseHandler):
return changed
@defer.inlineCallbacks
+ def send_threepid_validation(
+ self,
+ email_address,
+ client_secret,
+ send_attempt,
+ send_email_func,
+ next_link=None,
+ ):
+ """Send a threepid validation email for password reset or
+ registration purposes
+
+ Args:
+ email_address (str): The user's email address
+ client_secret (str): The provided client secret
+ send_attempt (int): Which send attempt this is
+ send_email_func (func): A function that takes an email address, token,
+ client_secret and session_id, sends an email
+ and returns a Deferred.
+ next_link (str|None): The URL to redirect the user to after validation
+
+ Returns:
+ The new session_id upon success
+
+ Raises:
+ SynapseError is an error occurred when sending the email
+ """
+ # Check that this email/client_secret/send_attempt combo is new or
+ # greater than what we've seen previously
+ session = yield self.store.get_threepid_validation_session(
+ "email", client_secret, address=email_address, validated=False
+ )
+
+ # Check to see if a session already exists and that it is not yet
+ # marked as validated
+ if session and session.get("validated_at") is None:
+ session_id = session["session_id"]
+ last_send_attempt = session["last_send_attempt"]
+
+ # Check that the send_attempt is higher than previous attempts
+ if send_attempt <= last_send_attempt:
+ # If not, just return a success without sending an email
+ return session_id
+ else:
+ # An non-validated session does not exist yet.
+ # Generate a session id
+ session_id = random_string(16)
+
+ # Generate a new validation token
+ token = random_string(32)
+
+ # Send the mail with the link containing the token, client_secret
+ # and session_id
+ try:
+ yield send_email_func(email_address, token, client_secret, session_id)
+ except Exception:
+ logger.exception(
+ "Error sending threepid validation email to %s", email_address
+ )
+ raise SynapseError(500, "An error was encountered when sending the email")
+
+ token_expires = (
+ self.hs.clock.time_msec() + self.hs.config.email_validation_token_lifetime
+ )
+
+ yield self.store.start_or_continue_validation_session(
+ "email",
+ email_address,
+ session_id,
+ client_secret,
+ send_attempt,
+ next_link,
+ token,
+ token_expires,
+ )
+
+ return session_id
+
+ @defer.inlineCallbacks
def requestEmailToken(
self, id_server, email, client_secret, send_attempt, next_link=None
):
- if not self._should_trust_id_server(id_server):
- raise SynapseError(
- 400, "Untrusted ID server '%s'" % id_server, Codes.SERVER_NOT_TRUSTED
- )
+ """
+ Request an external server send an email on our behalf for the purposes of threepid
+ validation.
+ Args:
+ id_server (str): The identity server to proxy to
+ email (str): The email to send the message to
+ client_secret (str): The unique client_secret sends by the user
+ send_attempt (int): Which attempt this is
+ next_link: A link to redirect the user to once they submit the token
+
+ Returns:
+ The json response body from the server
+ """
params = {
"email": email,
"client_secret": client_secret,
"send_attempt": send_attempt,
}
+ # if we have a rewrite rule set for the identity server,
+ # apply it now.
+ if id_server in self.rewrite_identity_server_urls:
+ id_server = self.rewrite_identity_server_urls[id_server]
+
if next_link:
- params.update({"next_link": next_link})
+ params["next_link"] = next_link
+
+ if self.hs.config.using_identity_server_from_trusted_list:
+ # Warn that a deprecated config option is in use
+ logger.warn(
+ 'The config option "trust_identity_server_for_password_resets" '
+ 'has been replaced by "account_threepid_delegate". '
+ "Please consult the sample config at docs/sample_config.yaml for "
+ "details and update your config file."
+ )
try:
data = yield self.http_client.post_json_get_json(
- "https://%s%s"
- % (id_server, "/_matrix/identity/api/v1/validate/email/requestToken"),
+ id_server + "/_matrix/identity/api/v1/validate/email/requestToken",
params,
)
return data
@@ -257,28 +451,200 @@ class IdentityHandler(BaseHandler):
@defer.inlineCallbacks
def requestMsisdnToken(
- self, id_server, country, phone_number, client_secret, send_attempt, **kwargs
+ self,
+ id_server,
+ country,
+ phone_number,
+ client_secret,
+ send_attempt,
+ next_link=None,
):
- if not self._should_trust_id_server(id_server):
- raise SynapseError(
- 400, "Untrusted ID server '%s'" % id_server, Codes.SERVER_NOT_TRUSTED
- )
+ """
+ Request an external server send an SMS message on our behalf for the purposes of
+ threepid validation.
+ Args:
+ id_server (str): The identity server to proxy to
+ country (str): The country code of the phone number
+ phone_number (str): The number to send the message to
+ client_secret (str): The unique client_secret sends by the user
+ send_attempt (int): Which attempt this is
+ next_link: A link to redirect the user to once they submit the token
+ Returns:
+ The json response body from the server
+ """
params = {
"country": country,
"phone_number": phone_number,
"client_secret": client_secret,
"send_attempt": send_attempt,
}
- params.update(kwargs)
+ if next_link:
+ params["next_link"] = next_link
+ if self.hs.config.using_identity_server_from_trusted_list:
+ # Warn that a deprecated config option is in use
+ logger.warn(
+ 'The config option "trust_identity_server_for_password_resets" '
+ 'has been replaced by "account_threepid_delegate". '
+ "Please consult the sample config at docs/sample_config.yaml for "
+ "details and update your config file."
+ )
+
+ # if we have a rewrite rule set for the identity server,
+ # apply it now.
+ if id_server in self.rewrite_identity_server_urls:
+ id_server = self.rewrite_identity_server_urls[id_server]
try:
data = yield self.http_client.post_json_get_json(
- "https://%s%s"
- % (id_server, "/_matrix/identity/api/v1/validate/msisdn/requestToken"),
+ id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken",
params,
)
return data
except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e.to_synapse_error()
+
+ # TODO: The following methods are used for proxying IS requests using
+ # the CS API. They should be consolidated with those in RoomMemberHandler
+ # https://github.com/matrix-org/synapse-dinsic/issues/25
+
+ @defer.inlineCallbacks
+ def lookup_3pid(self, id_server, medium, address):
+ """Looks up a 3pid in the passed identity server.
+
+ Args:
+ id_server (str): The server name (including port, if required)
+ of the identity server to use.
+ medium (str): The type of the third party identifier (e.g. "email").
+ address (str): The third party identifier (e.g. "foo@example.com").
+
+ Returns:
+ Deferred[dict]: The result of the lookup. See
+ https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup
+ for details
+ """
+ if not self._enable_lookup:
+ raise AuthError(
+ 403, "Looking up third-party identifiers is denied from this server"
+ )
+
+ target = self.rewrite_identity_server_urls.get(id_server, id_server)
+
+ try:
+ data = yield self.http_client.get_json(
+ "https://%s/_matrix/identity/api/v1/lookup" % (target,),
+ {"medium": medium, "address": address},
+ )
+
+ if "mxid" in data:
+ if "signatures" not in data:
+ raise AuthError(401, "No signatures on 3pid binding")
+ yield self._verify_any_signature(data, id_server)
+
+ except HttpResponseException as e:
+ logger.info("Proxied lookup failed: %r", e)
+ raise e.to_synapse_error()
+ except IOError as e:
+ logger.info("Failed to contact %r: %s", id_server, e)
+ raise ProxiedRequestError(503, "Failed to contact identity server")
+
+ defer.returnValue(data)
+
+ @defer.inlineCallbacks
+ def bulk_lookup_3pid(self, id_server, threepids):
+ """Looks up given 3pids in the passed identity server.
+
+ Args:
+ id_server (str): The server name (including port, if required)
+ of the identity server to use.
+ threepids ([[str, str]]): The third party identifiers to lookup, as
+ a list of 2-string sized lists ([medium, address]).
+
+ Returns:
+ Deferred[dict]: The result of the lookup. See
+ https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup
+ for details
+ """
+ if not self._enable_lookup:
+ raise AuthError(
+ 403, "Looking up third-party identifiers is denied from this server"
+ )
+
+ target = self.rewrite_identity_server_urls.get(id_server, id_server)
+
+ try:
+ data = yield self.http_client.post_json_get_json(
+ "https://%s/_matrix/identity/api/v1/bulk_lookup" % (target,),
+ {"threepids": threepids},
+ )
+
+ except HttpResponseException as e:
+ logger.info("Proxied lookup failed: %r", e)
+ raise e.to_synapse_error()
+ except IOError as e:
+ logger.info("Failed to contact %r: %s", id_server, e)
+ raise ProxiedRequestError(503, "Failed to contact identity server")
+
+ defer.returnValue(data)
+
+ @defer.inlineCallbacks
+ def _verify_any_signature(self, data, server_hostname):
+ if server_hostname not in data["signatures"]:
+ raise AuthError(401, "No signature from server %s" % (server_hostname,))
+
+ for key_name, signature in data["signatures"][server_hostname].items():
+ target = self.rewrite_identity_server_urls.get(
+ server_hostname, server_hostname
+ )
+
+ key_data = yield self.http_client.get_json(
+ "https://%s/_matrix/identity/api/v1/pubkey/%s" % (target, key_name)
+ )
+ if "public_key" not in key_data:
+ raise AuthError(
+ 401, "No public key named %s from %s" % (key_name, server_hostname)
+ )
+ verify_signed_json(
+ data,
+ server_hostname,
+ decode_verify_key_bytes(
+ key_name, decode_base64(key_data["public_key"])
+ ),
+ )
+ return
+
+ raise AuthError(401, "No signature from server %s" % (server_hostname,))
+
+
+def create_id_access_token_header(id_access_token):
+ """Create an Authorization header for passing to SimpleHttpClient as the header value
+ of an HTTP request.
+
+ Args:
+ id_access_token (str): An identity server access token.
+
+ Returns:
+ list[str]: The ascii-encoded bearer token encased in a list.
+ """
+ # Prefix with Bearer
+ bearer_token = "Bearer %s" % id_access_token
+
+ # Encode headers to standard ascii
+ bearer_token.encode("ascii")
+
+ # Return as a list as that's how SimpleHttpClient takes header values
+ return [bearer_token]
+
+
+class LookupAlgorithm:
+ """
+ Supported hashing algorithms when performing a 3PID lookup.
+
+ SHA256 - Hashing an (address, medium, pepper) combo with sha256, then url-safe base64
+ encoding
+ NONE - Not performing any hashing. Simply sending an (address, medium) combo in plaintext
+ """
+
+ SHA256 = "sha256"
+ NONE = "none"
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 42d6650ed9..f991efeee3 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -449,8 +449,7 @@ class InitialSyncHandler(BaseHandler):
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
- return (member_event.membership, member_event.event_id)
- return
+ return member_event.membership, member_event.event_id
except AuthError:
visibility = yield self.state_handler.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
@@ -459,8 +458,7 @@ class InitialSyncHandler(BaseHandler):
visibility
and visibility.content["history_visibility"] == "world_readable"
):
- return (Membership.JOIN, None)
- return
+ return Membership.JOIN, None
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index a5e23c4caf..f158700c15 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -24,7 +24,7 @@ from twisted.internet import defer
from twisted.internet.defer import succeed
from synapse import event_auth
-from synapse.api.constants import EventTypes, Membership, RelationTypes
+from synapse.api.constants import EventTypes, Membership, RelationTypes, UserTypes
from synapse.api.errors import (
AuthError,
Codes,
@@ -135,7 +135,7 @@ class MessageHandler(object):
raise NotFoundError("Can't find event for token %s" % (at_token,))
visible_events = yield filter_events_for_client(
- self.store, user_id, last_events
+ self.store, user_id, last_events, apply_retention_policies=False
)
event = last_events[0]
@@ -398,7 +398,7 @@ class EventCreationHandler(object):
403, "You must be in the room to create an alias for it"
)
- self.validator.validate_new(event)
+ self.validator.validate_new(event, self.config)
return (event, context)
@@ -469,6 +469,9 @@ class EventCreationHandler(object):
u = yield self.store.get_user_by_id(user_id)
assert u is not None
+ if u["user_type"] in (UserTypes.SUPPORT, UserTypes.BOT):
+ # support and bot users are not required to consent
+ return
if u["appservice_id"] is not None:
# users registered by an appservice are exempt
return
@@ -612,7 +615,7 @@ class EventCreationHandler(object):
if requester:
context.app_service = requester.app_service
- self.validator.validate_new(event)
+ self.validator.validate_new(event, self.config)
# If this event is an annotation then we check that that the sender
# can't annotate the same way twice (e.g. stops users from liking an
@@ -726,7 +729,27 @@ class EventCreationHandler(object):
assert not self.config.worker_app
if ratelimit:
- yield self.base_handler.ratelimit(requester)
+ # We check if this is a room admin redacting an event so that we
+ # can apply different ratelimiting. We do this by simply checking
+ # it's not a self-redaction (to avoid having to look up whether the
+ # user is actually admin or not).
+ is_admin_redaction = False
+ if event.type == EventTypes.Redaction:
+ original_event = yield self.store.get_event(
+ event.redacts,
+ check_redacted=False,
+ get_prev_content=False,
+ allow_rejected=False,
+ allow_none=True,
+ )
+
+ is_admin_redaction = (
+ original_event and event.sender != original_event.sender
+ )
+
+ yield self.base_handler.ratelimit(
+ requester, is_admin_redaction=is_admin_redaction
+ )
yield self.base_handler.maybe_kick_guest_users(event, context)
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index d83aab3f74..d8c3feff16 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -15,12 +15,15 @@
# limitations under the License.
import logging
+from six import iteritems
+
from twisted.internet import defer
from twisted.python.failure import Failure
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.logging.context import run_in_background
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.state import StateFilter
from synapse.types import RoomStreamToken
from synapse.util.async_helpers import ReadWriteLock
@@ -70,6 +73,7 @@ class PaginationHandler(object):
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.clock = hs.get_clock()
+ self._server_name = hs.hostname
self.pagination_lock = ReadWriteLock()
self._purges_in_progress_by_room = set()
@@ -77,6 +81,111 @@ class PaginationHandler(object):
self._purges_by_id = {}
self._event_serializer = hs.get_event_client_serializer()
+ self._retention_default_max_lifetime = hs.config.retention_default_max_lifetime
+
+ if hs.config.retention_enabled:
+ # Run the purge jobs described in the configuration file.
+ for job in hs.config.retention_purge_jobs:
+ self.clock.looping_call(
+ run_as_background_process,
+ job["interval"],
+ "purge_history_for_rooms_in_range",
+ self.purge_history_for_rooms_in_range,
+ job["shortest_max_lifetime"],
+ job["longest_max_lifetime"],
+ )
+
+ @defer.inlineCallbacks
+ def purge_history_for_rooms_in_range(self, min_ms, max_ms):
+ """Purge outdated events from rooms within the given retention range.
+
+ If a default retention policy is defined in the server's configuration and its
+ 'max_lifetime' is within this range, also targets rooms which don't have a
+ retention policy.
+
+ Args:
+ min_ms (int|None): Duration in milliseconds that define the lower limit of
+ the range to handle (exclusive). If None, it means that the range has no
+ lower limit.
+ max_ms (int|None): Duration in milliseconds that define the upper limit of
+ the range to handle (inclusive). If None, it means that the range has no
+ upper limit.
+ """
+ # We want the storage layer to to include rooms with no retention policy in its
+ # return value only if a default retention policy is defined in the server's
+ # configuration and that policy's 'max_lifetime' is either lower (or equal) than
+ # max_ms or higher than min_ms (or both).
+ if self._retention_default_max_lifetime is not None:
+ include_null = True
+
+ if min_ms is not None and min_ms >= self._retention_default_max_lifetime:
+ # The default max_lifetime is lower than (or equal to) min_ms.
+ include_null = False
+
+ if max_ms is not None and max_ms < self._retention_default_max_lifetime:
+ # The default max_lifetime is higher than max_ms.
+ include_null = False
+ else:
+ include_null = False
+
+ rooms = yield self.store.get_rooms_for_retention_period_in_range(
+ min_ms, max_ms, include_null
+ )
+
+ for room_id, retention_policy in iteritems(rooms):
+ if room_id in self._purges_in_progress_by_room:
+ logger.warning(
+ "[purge] not purging room %s as there's an ongoing purge running"
+ " for this room",
+ room_id,
+ )
+ continue
+
+ max_lifetime = retention_policy["max_lifetime"]
+
+ if max_lifetime is None:
+ # If max_lifetime is None, it means that include_null equals True,
+ # therefore we can safely assume that there is a default policy defined
+ # in the server's configuration.
+ max_lifetime = self._retention_default_max_lifetime
+
+ # Figure out what token we should start purging at.
+ ts = self.clock.time_msec() - max_lifetime
+
+ stream_ordering = (yield self.store.find_first_stream_ordering_after_ts(ts))
+
+ r = (
+ yield self.store.get_room_event_after_stream_ordering(
+ room_id, stream_ordering
+ )
+ )
+ if not r:
+ logger.warning(
+ "[purge] purging events not possible: No event found "
+ "(ts %i => stream_ordering %i)",
+ ts,
+ stream_ordering,
+ )
+ continue
+
+ (stream, topo, _event_id) = r
+ token = "t%d-%d" % (topo, stream)
+
+ purge_id = random_string(16)
+
+ self._purges_by_id[purge_id] = PurgeStatus()
+
+ logger.info(
+ "Starting purging events in room %s (purge_id %s)" % (room_id, purge_id)
+ )
+
+ # We want to purge everything, including local events, and to run the purge in
+ # the background so that it's not blocking any other operation apart from
+ # other purges in the same room.
+ run_as_background_process(
+ "_purge_history", self._purge_history, purge_id, room_id, token, True
+ )
+
def start_purge_history(self, room_id, token, delete_local_events=False):
"""Start off a history purge on a room.
@@ -153,6 +262,22 @@ class PaginationHandler(object):
"""
return self._purges_by_id.get(purge_id)
+ async def purge_room(self, room_id):
+ """Purge the given room from the database"""
+ with (await self.pagination_lock.write(room_id)):
+ # check we know about the room
+ await self.store.get_room_version(room_id)
+
+ # first check that we have no users in this room
+ joined = await defer.maybeDeferred(
+ self.store.is_host_joined, room_id, self._server_name
+ )
+
+ if joined:
+ raise SynapseError(400, "Users are still joined to this room")
+
+ await self.store.purge_room(room_id)
+
@defer.inlineCallbacks
def get_messages(
self,
diff --git a/synapse/handlers/password_policy.py b/synapse/handlers/password_policy.py
new file mode 100644
index 0000000000..d06b110269
--- /dev/null
+++ b/synapse/handlers/password_policy.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import re
+
+from synapse.api.errors import Codes, PasswordRefusedError
+
+logger = logging.getLogger(__name__)
+
+
+class PasswordPolicyHandler(object):
+ def __init__(self, hs):
+ self.policy = hs.config.password_policy
+ self.enabled = hs.config.password_policy_enabled
+
+ # Regexps for the spec'd policy parameters.
+ self.regexp_digit = re.compile("[0-9]")
+ self.regexp_symbol = re.compile("[^a-zA-Z0-9]")
+ self.regexp_uppercase = re.compile("[A-Z]")
+ self.regexp_lowercase = re.compile("[a-z]")
+
+ def validate_password(self, password):
+ """Checks whether a given password complies with the server's policy.
+
+ Args:
+ password (str): The password to check against the server's policy.
+
+ Raises:
+ PasswordRefusedError: The password doesn't comply with the server's policy.
+ """
+
+ if not self.enabled:
+ return
+
+ minimum_accepted_length = self.policy.get("minimum_length", 0)
+ if len(password) < minimum_accepted_length:
+ raise PasswordRefusedError(
+ msg=(
+ "The password must be at least %d characters long"
+ % minimum_accepted_length
+ ),
+ errcode=Codes.PASSWORD_TOO_SHORT,
+ )
+
+ if (
+ self.policy.get("require_digit", False)
+ and self.regexp_digit.search(password) is None
+ ):
+ raise PasswordRefusedError(
+ msg="The password must include at least one digit",
+ errcode=Codes.PASSWORD_NO_DIGIT,
+ )
+
+ if (
+ self.policy.get("require_symbol", False)
+ and self.regexp_symbol.search(password) is None
+ ):
+ raise PasswordRefusedError(
+ msg="The password must include at least one symbol",
+ errcode=Codes.PASSWORD_NO_SYMBOL,
+ )
+
+ if (
+ self.policy.get("require_uppercase", False)
+ and self.regexp_uppercase.search(password) is None
+ ):
+ raise PasswordRefusedError(
+ msg="The password must include at least one uppercase letter",
+ errcode=Codes.PASSWORD_NO_UPPERCASE,
+ )
+
+ if (
+ self.policy.get("require_lowercase", False)
+ and self.regexp_lowercase.search(password) is None
+ ):
+ raise PasswordRefusedError(
+ msg="The password must include at least one lowercase letter",
+ errcode=Codes.PASSWORD_NO_LOWERCASE,
+ )
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 94a9ca0357..053cf66b28 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -255,7 +255,7 @@ class PresenceHandler(object):
self.unpersisted_users_changes = set()
if unpersisted:
- logger.info("Persisting %d upersisted presence updates", len(unpersisted))
+ logger.info("Persisting %d unpersisted presence updates", len(unpersisted))
yield self.store.update_presence(
[self.user_to_current_state[user_id] for user_id in unpersisted]
)
@@ -1032,7 +1032,7 @@ class PresenceEventSource(object):
#
# Hence this guard where we just return nothing so that the sync
# doesn't return. C.f. #5503.
- return ([], max_token)
+ return [], max_token
presence = self.get_presence_handler()
stream_change_cache = self.store.presence_stream_cache
@@ -1279,7 +1279,7 @@ def get_interested_parties(store, states):
# Always notify self
users_to_states.setdefault(state.user_id, []).append(state)
- return (room_ids_to_states, users_to_states)
+ return room_ids_to_states, users_to_states
@defer.inlineCallbacks
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 2cc237e6a5..fb31711b29 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,8 +17,11 @@
import logging
from six import raise_from
+from six.moves import range
-from twisted.internet import defer
+from signedjson.sign import sign_json
+
+from twisted.internet import defer, reactor
from synapse.api.errors import (
AuthError,
@@ -27,6 +31,7 @@ from synapse.api.errors import (
StoreError,
SynapseError,
)
+from synapse.logging.context import run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID, get_domain_from_id
@@ -34,7 +39,7 @@ from ._base import BaseHandler
logger = logging.getLogger(__name__)
-MAX_DISPLAYNAME_LEN = 100
+MAX_DISPLAYNAME_LEN = 256
MAX_AVATAR_URL_LEN = 1000
@@ -46,6 +51,8 @@ class BaseProfileHandler(BaseHandler):
subclass MasterProfileHandler
"""
+ PROFILE_REPLICATE_INTERVAL = 2 * 60 * 1000
+
def __init__(self, hs):
super(BaseProfileHandler, self).__init__(hs)
@@ -56,6 +63,87 @@ class BaseProfileHandler(BaseHandler):
self.user_directory_handler = hs.get_user_directory_handler()
+ self.http_client = hs.get_simple_http_client()
+
+ self.max_avatar_size = hs.config.max_avatar_size
+ self.allowed_avatar_mimetypes = hs.config.allowed_avatar_mimetypes
+
+ if hs.config.worker_app is None:
+ self.clock.looping_call(
+ self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS
+ )
+
+ if len(self.hs.config.replicate_user_profiles_to) > 0:
+ reactor.callWhenRunning(self._assign_profile_replication_batches)
+ reactor.callWhenRunning(self._replicate_profiles)
+ # Add a looping call to replicate_profiles: this handles retries
+ # if the replication is unsuccessful when the user updated their
+ # profile.
+ self.clock.looping_call(
+ self._replicate_profiles, self.PROFILE_REPLICATE_INTERVAL
+ )
+
+ @defer.inlineCallbacks
+ def _assign_profile_replication_batches(self):
+ """If no profile replication has been done yet, allocate replication batch
+ numbers to each profile to start the replication process.
+ """
+ logger.info("Assigning profile batch numbers...")
+ total = 0
+ while True:
+ assigned = yield self.store.assign_profile_batch()
+ total += assigned
+ if assigned == 0:
+ break
+ logger.info("Assigned %d profile batch numbers", total)
+
+ @defer.inlineCallbacks
+ def _replicate_profiles(self):
+ """If any profile data has been updated and not pushed to the replication targets,
+ replicate it.
+ """
+ host_batches = yield self.store.get_replication_hosts()
+ latest_batch = yield self.store.get_latest_profile_replication_batch_number()
+ if latest_batch is None:
+ latest_batch = -1
+ for repl_host in self.hs.config.replicate_user_profiles_to:
+ if repl_host not in host_batches:
+ host_batches[repl_host] = -1
+ try:
+ for i in range(host_batches[repl_host] + 1, latest_batch + 1):
+ yield self._replicate_host_profile_batch(repl_host, i)
+ except Exception:
+ logger.exception(
+ "Exception while replicating to %s: aborting for now", repl_host
+ )
+
+ @defer.inlineCallbacks
+ def _replicate_host_profile_batch(self, host, batchnum):
+ logger.info("Replicating profile batch %d to %s", batchnum, host)
+ batch_rows = yield self.store.get_profile_batch(batchnum)
+ batch = {
+ UserID(r["user_id"], self.hs.hostname).to_string(): (
+ {"display_name": r["displayname"], "avatar_url": r["avatar_url"]}
+ if r["active"]
+ else None
+ )
+ for r in batch_rows
+ }
+
+ url = "https://%s/_matrix/identity/api/v1/replicate_profiles" % (host,)
+ body = {"batchnum": batchnum, "batch": batch, "origin_server": self.hs.hostname}
+ signed_body = sign_json(body, self.hs.hostname, self.hs.config.signing_key[0])
+ try:
+ yield self.http_client.post_json_get_json(url, signed_body)
+ yield self.store.update_replication_batch_for_host(host, batchnum)
+ logger.info("Sucessfully replicated profile batch %d to %s", batchnum, host)
+ except Exception:
+ # This will get retried when the looping call next comes around
+ logger.exception(
+ "Failed to replicate profile batch %d to %s", batchnum, host
+ )
+ raise
+
@defer.inlineCallbacks
def get_profile(self, user_id):
target_user = UserID.from_string(user_id)
@@ -154,9 +242,16 @@ class BaseProfileHandler(BaseHandler):
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this Home Server")
- if not by_admin and target_user != requester.user:
+ if not by_admin and requester and target_user != requester.user:
raise AuthError(400, "Cannot set another user's displayname")
+ if not by_admin and self.hs.config.disable_set_displayname:
+ profile = yield self.store.get_profileinfo(target_user.localpart)
+ if profile.display_name:
+ raise SynapseError(
+ 400, "Changing displayname is disabled on this server"
+ )
+
if len(new_displayname) > MAX_DISPLAYNAME_LEN:
raise SynapseError(
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,)
@@ -165,7 +260,17 @@ class BaseProfileHandler(BaseHandler):
if new_displayname == "":
new_displayname = None
- yield self.store.set_profile_displayname(target_user.localpart, new_displayname)
+ if len(self.hs.config.replicate_user_profiles_to) > 0:
+ cur_batchnum = (
+ yield self.store.get_latest_profile_replication_batch_number()
+ )
+ new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
+ else:
+ new_batchnum = None
+
+ yield self.store.set_profile_displayname(
+ target_user.localpart, new_displayname, new_batchnum
+ )
if self.hs.config.user_directory_search_all_users:
profile = yield self.store.get_profileinfo(target_user.localpart)
@@ -173,7 +278,39 @@ class BaseProfileHandler(BaseHandler):
target_user.to_string(), profile
)
- yield self._update_join_states(requester, target_user)
+ if requester:
+ yield self._update_join_states(requester, target_user)
+
+ # start a profile replication push
+ run_in_background(self._replicate_profiles)
+
+ @defer.inlineCallbacks
+ def set_active(self, target_user, active, hide):
+ """
+ Sets the 'active' flag on a user profile. If set to false, the user
+ account is considered deactivated or hidden.
+
+ If 'hide' is true, then we interpret active=False as a request to try to
+ hide the user rather than deactivating it. This means withholding the
+ profile from replication (and mark it as inactive) rather than clearing
+ the profile from the HS DB. Note that unlike set_displayname and
+ set_avatar_url, this does *not* perform authorization checks! This is
+ because the only place it's used currently is in account deactivation
+ where we've already done these checks anyway.
+ """
+ if len(self.hs.config.replicate_user_profiles_to) > 0:
+ cur_batchnum = (
+ yield self.store.get_latest_profile_replication_batch_number()
+ )
+ new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
+ else:
+ new_batchnum = None
+ yield self.store.set_profile_active(
+ target_user.localpart, active, hide, new_batchnum
+ )
+
+ # start a profile replication push
+ run_in_background(self._replicate_profiles)
@defer.inlineCallbacks
def get_avatar_url(self, target_user):
@@ -212,12 +349,59 @@ class BaseProfileHandler(BaseHandler):
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's avatar_url")
+ if not by_admin and self.hs.config.disable_set_avatar_url:
+ profile = yield self.store.get_profileinfo(target_user.localpart)
+ if profile.avatar_url:
+ raise SynapseError(
+ 400, "Changing avatar url is disabled on this server"
+ )
+
+ if len(self.hs.config.replicate_user_profiles_to) > 0:
+ cur_batchnum = (
+ yield self.store.get_latest_profile_replication_batch_number()
+ )
+ new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
+ else:
+ new_batchnum = None
+
if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
raise SynapseError(
400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,)
)
- yield self.store.set_profile_avatar_url(target_user.localpart, new_avatar_url)
+ # Enforce a max avatar size if one is defined
+ if self.max_avatar_size or self.allowed_avatar_mimetypes:
+ media_id = self._validate_and_parse_media_id_from_avatar_url(new_avatar_url)
+
+ # Check that this media exists locally
+ media_info = yield self.store.get_local_media(media_id)
+ if not media_info:
+ raise SynapseError(
+ 400, "Unknown media id supplied", errcode=Codes.NOT_FOUND
+ )
+
+ # Ensure avatar does not exceed max allowed avatar size
+ media_size = media_info["media_length"]
+ if self.max_avatar_size and media_size > self.max_avatar_size:
+ raise SynapseError(
+ 400,
+ "Avatars must be less than %s bytes in size"
+ % (self.max_avatar_size,),
+ errcode=Codes.TOO_LARGE,
+ )
+
+ # Ensure the avatar's file type is allowed
+ if (
+ self.allowed_avatar_mimetypes
+ and media_info["media_type"] not in self.allowed_avatar_mimetypes
+ ):
+ raise SynapseError(
+ 400, "Avatar file type '%s' not allowed" % media_info["media_type"]
+ )
+
+ yield self.store.set_profile_avatar_url(
+ target_user.localpart, new_avatar_url, new_batchnum
+ )
if self.hs.config.user_directory_search_all_users:
profile = yield self.store.get_profileinfo(target_user.localpart)
@@ -227,6 +411,23 @@ class BaseProfileHandler(BaseHandler):
yield self._update_join_states(requester, target_user)
+ # start a profile replication push
+ run_in_background(self._replicate_profiles)
+
+ def _validate_and_parse_media_id_from_avatar_url(self, mxc):
+ """Validate and parse a provided avatar url and return the local media id
+
+ Args:
+ mxc (str): A mxc URL
+
+ Returns:
+ str: The ID of the media
+ """
+ avatar_pieces = mxc.split("/")
+ if len(avatar_pieces) != 4 or avatar_pieces[0] != "mxc:":
+ raise SynapseError(400, "Invalid avatar URL '%s' supplied" % mxc)
+ return avatar_pieces[-1]
+
@defer.inlineCallbacks
def on_profile_query(self, args):
user = UserID.from_string(args["user_id"])
@@ -282,7 +483,7 @@ class BaseProfileHandler(BaseHandler):
@defer.inlineCallbacks
def check_profile_query_allowed(self, target_user, requester=None):
"""Checks whether a profile query is allowed. If the
- 'require_auth_for_profile_requests' config flag is set to True and a
+ 'limit_profile_requests_to_known_users' config flag is set to True and a
'requester' is provided, the query is only allowed if the two users
share a room.
@@ -300,7 +501,11 @@ class BaseProfileHandler(BaseHandler):
# be None when this function is called outside of a profile query, e.g.
# when building a membership event. In this case, we must allow the
# lookup.
- if not self.hs.config.require_auth_for_profile_requests or not requester:
+ if not self.hs.config.limit_profile_requests_to_known_users or not requester:
+ return
+
+ # Always allow the user to query their own profile.
+ if target_user.to_string() == requester.to_string():
return
# Always allow the user to query their own profile.
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 73973502a4..6854c751a6 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -148,7 +148,7 @@ class ReceiptEventSource(object):
to_key = yield self.get_current_key()
if from_key == to_key:
- return ([], to_key)
+ return [], to_key
events = yield self.store.get_linearized_receipts_for_rooms(
room_ids, from_key=from_key, to_key=to_key
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 4631fab94e..983c8f1bff 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -24,13 +24,11 @@ from synapse.api.errors import (
AuthError,
Codes,
ConsentNotGivenError,
- InvalidCaptchaError,
LimitExceededError,
RegistrationError,
SynapseError,
)
from synapse.config.server import is_threepid_reserved
-from synapse.http.client import CaptchaServerHttpClient
from synapse.http.servlet import assert_params_in_dict
from synapse.replication.http.login import RegisterDeviceReplicationServlet
from synapse.replication.http.register import (
@@ -39,7 +37,6 @@ from synapse.replication.http.register import (
)
from synapse.types import RoomAlias, RoomID, UserID, create_requester
from synapse.util.async_helpers import Linearizer
-from synapse.util.threepids import check_3pid_allowed
from ._base import BaseHandler
@@ -59,7 +56,7 @@ class RegistrationHandler(BaseHandler):
self._auth_handler = hs.get_auth_handler()
self.profile_handler = hs.get_profile_handler()
self.user_directory_handler = hs.get_user_directory_handler()
- self.captcha_client = CaptchaServerHttpClient(hs)
+ self.http_client = hs.get_simple_http_client()
self.identity_handler = self.hs.get_handlers().identity_handler
self.ratelimiter = hs.get_registration_ratelimiter()
@@ -72,6 +69,8 @@ class RegistrationHandler(BaseHandler):
)
self._server_notices_mxid = hs.config.server_notices_mxid
+ self._show_in_user_directory = self.hs.config.show_users_in_user_directory
+
if hs.config.worker_app:
self._register_client = ReplicationRegisterServlet.make_client(hs)
self._register_device_client = RegisterDeviceReplicationServlet.make_client(
@@ -213,6 +212,11 @@ class RegistrationHandler(BaseHandler):
address=address,
)
+ if default_display_name:
+ yield self.profile_handler.set_displayname(
+ user, None, default_display_name, by_admin=True
+ )
+
if self.hs.config.user_directory_search_all_users:
profile = yield self.store.get_profileinfo(localpart)
yield self.user_directory_handler.handle_local_profile_change(
@@ -238,6 +242,11 @@ class RegistrationHandler(BaseHandler):
create_profile_with_displayname=default_display_name,
address=address,
)
+
+ yield self.profile_handler.set_displayname(
+ user, None, default_display_name, by_admin=True
+ )
+
except SynapseError:
# if user id is taken, just generate another
user = None
@@ -265,6 +274,14 @@ class RegistrationHandler(BaseHandler):
# Bind email to new account
yield self._register_email_threepid(user_id, threepid_dict, None, False)
+ # Prevent the new user from showing up in the user directory if the server
+ # mandates it.
+ if not self._show_in_user_directory:
+ yield self.store.add_account_data_for_user(
+ user_id, "im.vector.hide_profile", {"hide_profile": True}
+ )
+ yield self.profile_handler.set_active(user, False, True)
+
return user_id
@defer.inlineCallbacks
@@ -279,16 +296,12 @@ class RegistrationHandler(BaseHandler):
fake_requester = create_requester(user_id)
# try to create the room if we're the first real user on the server. Note
- # that an auto-generated support user is not a real user and will never be
+ # that an auto-generated support or bot user is not a real user and will never be
# the user to create the room
should_auto_create_rooms = False
- is_support = yield self.store.is_support_user(user_id)
- # There is an edge case where the first user is the support user, then
- # the room is never created, though this seems unlikely and
- # recoverable from given the support user being involved in the first
- # place.
- if self.hs.config.autocreate_auto_join_rooms and not is_support:
- count = yield self.store.count_all_users()
+ is_real_user = yield self.store.is_real_user(user_id)
+ if self.hs.config.autocreate_auto_join_rooms and is_real_user:
+ count = yield self.store.count_real_users()
should_auto_create_rooms = count == 1
for r in self.hs.config.auto_join_rooms:
logger.info("Auto-joining %s to %s", user_id, r)
@@ -335,7 +348,9 @@ class RegistrationHandler(BaseHandler):
yield self._auto_join_rooms(user_id)
@defer.inlineCallbacks
- def appservice_register(self, user_localpart, as_token):
+ def appservice_register(self, user_localpart, as_token, password, display_name):
+ # FIXME: this should be factored out and merged with normal register()
+
user = UserID(user_localpart, self.hs.hostname)
user_id = user.to_string()
service = self.store.get_app_service_by_token(as_token)
@@ -354,77 +369,30 @@ class RegistrationHandler(BaseHandler):
user_id, allowed_appservice=service
)
+ password_hash = ""
+ if password:
+ password_hash = yield self.auth_handler().hash(password)
+
+ display_name = display_name or user.localpart
+
yield self.register_with_store(
user_id=user_id,
- password_hash="",
+ password_hash=password_hash,
appservice_id=service_id,
- create_profile_with_displayname=user.localpart,
+ create_profile_with_displayname=display_name,
)
- return user_id
-
- @defer.inlineCallbacks
- def check_recaptcha(self, ip, private_key, challenge, response):
- """
- Checks a recaptcha is correct.
-
- Used only by c/s api v1
- """
- captcha_response = yield self._validate_captcha(
- ip, private_key, challenge, response
+ yield self.profile_handler.set_displayname(
+ user, None, display_name, by_admin=True
)
- if not captcha_response["valid"]:
- logger.info(
- "Invalid captcha entered from %s. Error: %s",
- ip,
- captcha_response["error_url"],
- )
- raise InvalidCaptchaError(error_url=captcha_response["error_url"])
- else:
- logger.info("Valid captcha entered from %s", ip)
-
- @defer.inlineCallbacks
- def register_email(self, threepidCreds):
- """
- Registers emails with an identity server.
-
- Used only by c/s api v1
- """
- for c in threepidCreds:
- logger.info(
- "validating threepidcred sid %s on id server %s",
- c["sid"],
- c["idServer"],
- )
- try:
- threepid = yield self.identity_handler.threepid_from_creds(c)
- except Exception:
- logger.exception("Couldn't validate 3pid")
- raise RegistrationError(400, "Couldn't validate 3pid")
-
- if not threepid:
- raise RegistrationError(400, "Couldn't validate 3pid")
- logger.info(
- "got threepid with medium '%s' and address '%s'",
- threepid["medium"],
- threepid["address"],
+ if self.hs.config.user_directory_search_all_users:
+ profile = yield self.store.get_profileinfo(user_localpart)
+ yield self.user_directory_handler.handle_local_profile_change(
+ user_id, profile
)
- if not check_3pid_allowed(self.hs, threepid["medium"], threepid["address"]):
- raise RegistrationError(403, "Third party identifier is not allowed")
-
- @defer.inlineCallbacks
- def bind_emails(self, user_id, threepidCreds):
- """Links emails with a user ID and informs an identity server.
-
- Used only by c/s api v1
- """
-
- # Now we have a matrix ID, bind it to the threepids we were given
- for c in threepidCreds:
- # XXX: This should be a deferred list, shouldn't it?
- yield self.identity_handler.bind_threepid(c, user_id)
+ return user_id
def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
# don't allow people to register the server notices mxid
@@ -451,6 +419,39 @@ class RegistrationHandler(BaseHandler):
)
@defer.inlineCallbacks
+ def shadow_register(self, localpart, display_name, auth_result, params):
+ """Invokes the current registration on another server, using
+ shared secret registration, passing in any auth_results from
+ other registration UI auth flows (e.g. validated 3pids)
+ Useful for setting up shadow/backup accounts on a parallel deployment.
+ """
+
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.post_json_get_json(
+ "%s/_matrix/client/r0/register?access_token=%s" % (shadow_hs_url, as_token),
+ {
+ # XXX: auth_result is an unspecified extension for shadow registration
+ "auth_result": auth_result,
+ # XXX: another unspecified extension for shadow registration to ensure
+ # that the displayname is correctly set by the masters erver
+ "display_name": display_name,
+ "username": localpart,
+ "password": params.get("password"),
+ "bind_email": params.get("bind_email"),
+ "bind_msisdn": params.get("bind_msisdn"),
+ "device_id": params.get("device_id"),
+ "initial_device_display_name": params.get(
+ "initial_device_display_name"
+ ),
+ "inhibit_login": False,
+ "access_token": as_token,
+ },
+ )
+
+ @defer.inlineCallbacks
def _generate_user_id(self, reseed=False):
if reseed or self._next_generated_user_id is None:
with (yield self._generate_user_id_linearizer.queue(())):
@@ -464,44 +465,7 @@ class RegistrationHandler(BaseHandler):
return str(id)
@defer.inlineCallbacks
- def _validate_captcha(self, ip_addr, private_key, challenge, response):
- """Validates the captcha provided.
-
- Used only by c/s api v1
-
- Returns:
- dict: Containing 'valid'(bool) and 'error_url'(str) if invalid.
-
- """
- response = yield self._submit_captcha(ip_addr, private_key, challenge, response)
- # parse Google's response. Lovely format..
- lines = response.split("\n")
- json = {
- "valid": lines[0] == "true",
- "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?"
- + "error=%s" % lines[1],
- }
- return json
-
- @defer.inlineCallbacks
- def _submit_captcha(self, ip_addr, private_key, challenge, response):
- """
- Used only by c/s api v1
- """
- data = yield self.captcha_client.post_urlencoded_get_raw(
- "http://www.recaptcha.net:80/recaptcha/api/verify",
- args={
- "privatekey": private_key,
- "remoteip": ip_addr,
- "challenge": challenge,
- "response": response,
- },
- )
- return data
-
- @defer.inlineCallbacks
def _join_user_to_room(self, requester, room_identifier):
- room_id = None
room_member_handler = self.hs.get_room_member_handler()
if RoomID.is_valid(room_identifier):
room_id = room_identifier
@@ -622,7 +586,7 @@ class RegistrationHandler(BaseHandler):
initial_display_name=initial_display_name,
is_guest=is_guest,
)
- return (r["device_id"], r["access_token"])
+ return r["device_id"], r["access_token"]
valid_until_ms = None
if self.session_lifetime is not None:
@@ -648,9 +612,7 @@ class RegistrationHandler(BaseHandler):
return (device_id, access_token)
@defer.inlineCallbacks
- def post_registration_actions(
- self, user_id, auth_result, access_token, bind_email, bind_msisdn
- ):
+ def post_registration_actions(self, user_id, auth_result, access_token):
"""A user has completed registration
Args:
@@ -659,18 +621,10 @@ class RegistrationHandler(BaseHandler):
registered user.
access_token (str|None): The access token of the newly logged in
device, or None if `inhibit_login` enabled.
- bind_email (bool): Whether to bind the email with the identity
- server.
- bind_msisdn (bool): Whether to bind the msisdn with the identity
- server.
"""
if self.hs.config.worker_app:
yield self._post_registration_client(
- user_id=user_id,
- auth_result=auth_result,
- access_token=access_token,
- bind_email=bind_email,
- bind_msisdn=bind_msisdn,
+ user_id=user_id, auth_result=auth_result, access_token=access_token
)
return
@@ -683,13 +637,11 @@ class RegistrationHandler(BaseHandler):
):
yield self.store.upsert_monthly_active_user(user_id)
- yield self._register_email_threepid(
- user_id, threepid, access_token, bind_email
- )
+ yield self._register_email_threepid(user_id, threepid, access_token)
if auth_result and LoginType.MSISDN in auth_result:
threepid = auth_result[LoginType.MSISDN]
- yield self._register_msisdn_threepid(user_id, threepid, bind_msisdn)
+ yield self._register_msisdn_threepid(user_id, threepid)
if auth_result and LoginType.TERMS in auth_result:
yield self._on_user_consented(user_id, self.hs.config.user_consent_version)
@@ -708,14 +660,12 @@ class RegistrationHandler(BaseHandler):
yield self.post_consent_actions(user_id)
@defer.inlineCallbacks
- def _register_email_threepid(self, user_id, threepid, token, bind_email):
+ def _register_email_threepid(self, user_id, threepid, token):
"""Add an email address as a 3pid identifier
Also adds an email pusher for the email address, if configured in the
HS config
- Also optionally binds emails to the given user_id on the identity server
-
Must be called on master.
Args:
@@ -723,8 +673,6 @@ class RegistrationHandler(BaseHandler):
threepid (object): m.login.email.identity auth response
token (str|None): access_token for the user, or None if not logged
in.
- bind_email (bool): true if the client requested the email to be
- bound at the identity server
Returns:
defer.Deferred:
"""
@@ -766,29 +714,15 @@ class RegistrationHandler(BaseHandler):
data={},
)
- if bind_email:
- logger.info("bind_email specified: binding")
- logger.debug("Binding emails %s to %s" % (threepid, user_id))
- yield self.identity_handler.bind_threepid(
- threepid["threepid_creds"], user_id
- )
- else:
- logger.info("bind_email not specified: not binding email")
-
@defer.inlineCallbacks
- def _register_msisdn_threepid(self, user_id, threepid, bind_msisdn):
+ def _register_msisdn_threepid(self, user_id, threepid):
"""Add a phone number as a 3pid identifier
- Also optionally binds msisdn to the given user_id on the identity server
-
Must be called on master.
Args:
user_id (str): id of user
threepid (object): m.login.msisdn auth response
- token (str): access_token for the user
- bind_email (bool): true if the client requested the email to be
- bound at the identity server
Returns:
defer.Deferred:
"""
@@ -804,12 +738,3 @@ class RegistrationHandler(BaseHandler):
yield self._auth_handler.add_threepid(
user_id, threepid["medium"], threepid["address"], threepid["validated_at"]
)
-
- if bind_msisdn:
- logger.info("bind_msisdn specified: binding")
- logger.debug("Binding msisdn %s to %s", threepid, user_id)
- yield self.identity_handler.bind_threepid(
- threepid["threepid_creds"], user_id
- )
- else:
- logger.info("bind_msisdn not specified: not binding msisdn")
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 5caa90c3b7..3e815d7d46 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -52,12 +52,14 @@ class RoomCreationHandler(BaseHandler):
"history_visibility": "shared",
"original_invitees_have_ops": False,
"guest_can_join": True,
+ "encryption_alg": "m.megolm.v1.aes-sha2",
},
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": "shared",
"original_invitees_have_ops": True,
"guest_can_join": True,
+ "encryption_alg": "m.megolm.v1.aes-sha2",
},
RoomCreationPreset.PUBLIC_CHAT: {
"join_rules": JoinRules.PUBLIC,
@@ -294,7 +296,19 @@ class RoomCreationHandler(BaseHandler):
"""
user_id = requester.user.to_string()
- if not self.spam_checker.user_may_create_room(user_id):
+ if (
+ self._server_notices_mxid is not None
+ and requester.user.to_string() == self._server_notices_mxid
+ ):
+ # allow the server notices mxid to create rooms
+ is_requester_admin = True
+
+ else:
+ is_requester_admin = yield self.auth.is_server_admin(requester.user)
+
+ if not is_requester_admin and not self.spam_checker.user_may_create_room(
+ user_id, invite_list=[], third_party_invite_list=[], cloning=True
+ ):
raise SynapseError(403, "You are not permitted to create rooms")
creation_content = {
@@ -516,8 +530,14 @@ class RoomCreationHandler(BaseHandler):
requester, config, is_requester_admin=is_requester_admin
)
+ invite_list = config.get("invite", [])
+ invite_3pid_list = config.get("invite_3pid", [])
+
if not is_requester_admin and not self.spam_checker.user_may_create_room(
- user_id
+ user_id,
+ invite_list=invite_list,
+ third_party_invite_list=invite_3pid_list,
+ cloning=False,
):
raise SynapseError(403, "You are not permitted to create rooms")
@@ -551,7 +571,6 @@ class RoomCreationHandler(BaseHandler):
else:
room_alias = None
- invite_list = config.get("invite", [])
for i in invite_list:
try:
UserID.from_string(i)
@@ -560,15 +579,25 @@ class RoomCreationHandler(BaseHandler):
yield self.event_creation_handler.assert_accepted_privacy_policy(requester)
- invite_3pid_list = config.get("invite_3pid", [])
+ power_level_content_override = config.get("power_level_content_override")
+ if (
+ power_level_content_override
+ and "users" in power_level_content_override
+ and user_id not in power_level_content_override["users"]
+ ):
+ raise SynapseError(
+ 400,
+ "Not a valid power_level_content_override: 'users' did not contain %s"
+ % (user_id,),
+ )
visibility = config.get("visibility", None)
is_public = visibility == "public"
room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
+ directory_handler = self.hs.get_handlers().directory_handler
if room_alias:
- directory_handler = self.hs.get_handlers().directory_handler
yield directory_handler.create_association(
requester=requester,
room_id=room_id,
@@ -604,7 +633,7 @@ class RoomCreationHandler(BaseHandler):
initial_state=initial_state,
creation_content=creation_content,
room_alias=room_alias,
- power_level_content_override=config.get("power_level_content_override"),
+ power_level_content_override=power_level_content_override,
creator_join_profile=creator_join_profile,
)
@@ -649,10 +678,12 @@ class RoomCreationHandler(BaseHandler):
"invite",
ratelimit=False,
content=content,
+ new_room=True,
)
for invite_3pid in invite_3pid_list:
id_server = invite_3pid["id_server"]
+ id_access_token = invite_3pid.get("id_access_token") # optional
address = invite_3pid["address"]
medium = invite_3pid["medium"]
yield self.hs.get_room_member_handler().do_3pid_invite(
@@ -663,6 +694,8 @@ class RoomCreationHandler(BaseHandler):
id_server,
requester,
txn_id=None,
+ new_room=True,
+ id_access_token=id_access_token,
)
result = {"room_id": room_id}
@@ -719,6 +752,7 @@ class RoomCreationHandler(BaseHandler):
"join",
ratelimit=False,
content=creator_join_profile,
+ new_room=True,
)
# We treat the power levels override specially as this needs to be one
@@ -780,6 +814,13 @@ class RoomCreationHandler(BaseHandler):
for (etype, state_key), content in initial_state.items():
yield send(etype=etype, state_key=state_key, content=content)
+ if "encryption_alg" in config:
+ yield send(
+ etype=EventTypes.Encryption,
+ state_key="",
+ content={"algorithm": config["encryption_alg"]},
+ )
+
@defer.inlineCallbacks
def _generate_room_id(self, creator_id, is_public):
# autogen room IDs and try to create it. We may clash, so just
@@ -840,7 +881,6 @@ class RoomContextHandler(object):
)
if not event:
return None
- return
filtered = yield (filter_evts([event]))
if not filtered:
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index e9094ad02b..a7e55f00e5 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -25,6 +25,7 @@ from unpaddedbase64 import decode_base64, encode_base64
from twisted.internet import defer
from synapse.api.constants import EventTypes, JoinRules
+from synapse.api.errors import Codes, HttpResponseException
from synapse.types import ThirdPartyInstanceID
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.descriptors import cachedInlineCallbacks
@@ -485,7 +486,33 @@ class RoomListHandler(BaseHandler):
return {"chunk": [], "total_room_count_estimate": 0}
if search_filter:
- # We currently don't support searching across federation, so we have
+ # Searching across federation is defined in MSC2197.
+ # However, the remote homeserver may or may not actually support it.
+ # So we first try an MSC2197 remote-filtered search, then fall back
+ # to a locally-filtered search if we must.
+
+ try:
+ res = yield self._get_remote_list_cached(
+ server_name,
+ limit=limit,
+ since_token=since_token,
+ include_all_networks=include_all_networks,
+ third_party_instance_id=third_party_instance_id,
+ search_filter=search_filter,
+ )
+ return res
+ except HttpResponseException as hre:
+ syn_err = hre.to_synapse_error()
+ if hre.code in (404, 405) or syn_err.errcode in (
+ Codes.UNRECOGNIZED,
+ Codes.NOT_FOUND,
+ ):
+ logger.debug("Falling back to locally-filtered /publicRooms")
+ else:
+ raise # Not an error that should trigger a fallback.
+
+ # if we reach this point, then we fall back to the situation where
+ # we currently don't support searching across federation, so we have
# to do it manually without pagination
limit = None
since_token = None
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 249a6d9c5d..fa894907f7 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -28,12 +28,18 @@ from twisted.internet import defer
from synapse import types
from synapse.api.constants import EventTypes, Membership
-from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError
+from synapse.api.ratelimiting import Ratelimiter
+from synapse.api.errors import (
+ AuthError,
+ Codes,
+ HttpResponseException,
+ SynapseError,
+)
+from synapse.handlers.identity import LookupAlgorithm, create_id_access_token_header
from synapse.types import RoomID, UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_joined_room, user_left_room
-
-from ._base import BaseHandler
+from synapse.util.hash import sha256_and_url_safe_base64
logger = logging.getLogger(__name__)
@@ -66,6 +72,7 @@ class RoomMemberHandler(object):
self.registration_handler = hs.get_registration_handler()
self.profile_handler = hs.get_profile_handler()
self.event_creation_handler = hs.get_event_creation_handler()
+ self.identity_handler = hs.get_handlers().identity_handler
self.member_linearizer = Linearizer(name="member")
@@ -73,13 +80,10 @@ class RoomMemberHandler(object):
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._server_notices_mxid = self.config.server_notices_mxid
+ self.rewrite_identity_server_urls = self.config.rewrite_identity_server_urls
self._enable_lookup = hs.config.enable_3pid_lookup
self.allow_per_room_profiles = self.config.allow_per_room_profiles
-
- # This is only used to get at ratelimit function, and
- # maybe_kick_guest_users. It's fine there are multiple of these as
- # it doesn't store state.
- self.base_handler = BaseHandler(hs)
+ self.ratelimiter = Ratelimiter()
@abc.abstractmethod
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
@@ -100,7 +104,7 @@ class RoomMemberHandler(object):
raise NotImplementedError()
@abc.abstractmethod
- def _remote_reject_invite(self, remote_room_hosts, room_id, target):
+ def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
"""Attempt to reject an invite for a room this server is not in. If we
fail to do so we locally mark the invite as rejected.
@@ -284,8 +288,31 @@ class RoomMemberHandler(object):
third_party_signed=None,
ratelimit=True,
content=None,
+ new_room=False,
require_consent=True,
):
+ """Update a users membership in a room
+
+ Args:
+ requester (Requester)
+ target (UserID)
+ room_id (str)
+ action (str): The "action" the requester is performing against the
+ target. One of join/leave/kick/ban/invite/unban.
+ txn_id (str|None): The transaction ID associated with the request,
+ or None not provided.
+ remote_room_hosts (list[str]|None): List of remote servers to try
+ and join via if server isn't already in the room.
+ third_party_signed (dict|None): The signed object for third party
+ invites.
+ ratelimit (bool): Whether to apply ratelimiting to this request.
+ content (dict|None): Fields to include in the new events content.
+ new_room (bool): Whether these membership changes are happening
+ as part of a room creation (e.g. initial joins and invites)
+
+ Returns:
+ Deferred[FrozenEvent]
+ """
key = (room_id,)
with (yield self.member_linearizer.queue(key)):
@@ -299,6 +326,7 @@ class RoomMemberHandler(object):
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
+ new_room=new_room,
require_consent=require_consent,
)
@@ -316,6 +344,7 @@ class RoomMemberHandler(object):
third_party_signed=None,
ratelimit=True,
content=None,
+ new_room=False,
require_consent=True,
):
content_specified = bool(content)
@@ -380,8 +409,15 @@ class RoomMemberHandler(object):
)
block_invite = True
+ is_published = yield self.store.is_room_published(room_id)
+
if not self.spam_checker.user_may_invite(
- requester.user.to_string(), target.to_string(), room_id
+ requester.user.to_string(),
+ target.to_string(),
+ third_party_invite=None,
+ room_id=room_id,
+ new_room=new_room,
+ published_room=is_published,
):
logger.info("Blocking invite due to spam checker")
block_invite = True
@@ -454,8 +490,26 @@ class RoomMemberHandler(object):
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
+ if (
+ self._server_notices_mxid is not None
+ and requester.user.to_string() == self._server_notices_mxid
+ ):
+ # allow the server notices mxid to join rooms
+ is_requester_admin = True
+
+ else:
+ is_requester_admin = yield self.auth.is_server_admin(requester.user)
+
+ inviter = yield self._get_inviter(target.to_string(), room_id)
+ if not is_requester_admin:
+ # We assume that if the spam checker allowed the user to create
+ # a room then they're allowed to join it.
+ if not new_room and not self.spam_checker.user_may_join_room(
+ target.to_string(), room_id, is_invited=inviter is not None
+ ):
+ raise SynapseError(403, "Not allowed to join this room")
+
if not is_host_in_room:
- inviter = yield self._get_inviter(target.to_string(), room_id)
if inviter and not self.hs.is_mine(inviter):
remote_room_hosts.append(inviter.domain)
@@ -510,9 +564,7 @@ class RoomMemberHandler(object):
return res
@defer.inlineCallbacks
- def send_membership_event(
- self, requester, event, context, remote_room_hosts=None, ratelimit=True
- ):
+ def send_membership_event(self, requester, event, context, ratelimit=True):
"""
Change the membership status of a user in a room.
@@ -522,16 +574,10 @@ class RoomMemberHandler(object):
act as the sender, will be skipped.
event (SynapseEvent): The membership event.
context: The context of the event.
- is_guest (bool): Whether the sender is a guest.
- room_hosts ([str]): Homeservers which are likely to already be in
- the room, and could be danced with in order to join this
- homeserver for the first time.
ratelimit (bool): Whether to rate limit this request.
Raises:
SynapseError if there was a problem changing the membership.
"""
- remote_room_hosts = remote_room_hosts or []
-
target_user = UserID.from_string(event.state_key)
room_id = event.room_id
@@ -634,7 +680,7 @@ class RoomMemberHandler(object):
servers.remove(room_alias.domain)
servers.insert(0, room_alias.domain)
- return (RoomID.from_string(room_id), servers)
+ return RoomID.from_string(room_id), servers
@defer.inlineCallbacks
def _get_inviter(self, user_id, room_id):
@@ -646,7 +692,16 @@ class RoomMemberHandler(object):
@defer.inlineCallbacks
def do_3pid_invite(
- self, room_id, inviter, medium, address, id_server, requester, txn_id
+ self,
+ room_id,
+ inviter,
+ medium,
+ address,
+ id_server,
+ requester,
+ txn_id,
+ new_room=False,
+ id_access_token=None,
):
if self.config.block_non_admin_invites:
is_requester_admin = yield self.auth.is_server_admin(requester.user)
@@ -657,7 +712,23 @@ class RoomMemberHandler(object):
# We need to rate limit *before* we send out any 3PID invites, so we
# can't just rely on the standard ratelimiting of events.
- yield self.base_handler.ratelimit(requester)
+ self.ratelimiter.ratelimit(
+ requester.user.to_string(),
+ time_now_s=self.hs.clock.time(),
+ rate_hz=self.hs.config.rc_third_party_invite.per_second,
+ burst_count=self.hs.config.rc_third_party_invite.burst_count,
+ update=True,
+ )
+
+ can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited(
+ medium, address, room_id
+ )
+ if not can_invite:
+ raise SynapseError(
+ 403,
+ "This third-party identifier can not be invited in this room",
+ Codes.FORBIDDEN,
+ )
can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited(
medium, address, room_id
@@ -669,7 +740,25 @@ class RoomMemberHandler(object):
Codes.FORBIDDEN,
)
- invitee = yield self._lookup_3pid(id_server, medium, address)
+ if not self._enable_lookup:
+ raise SynapseError(
+ 403, "Looking up third-party identifiers is denied from this server"
+ )
+
+ invitee = yield self._lookup_3pid(id_server, medium, address, id_access_token)
+
+ is_published = yield self.store.is_room_published(room_id)
+
+ if not self.spam_checker.user_may_invite(
+ requester.user.to_string(),
+ invitee,
+ third_party_invite={"medium": medium, "address": address},
+ room_id=room_id,
+ new_room=new_room,
+ published_room=is_published,
+ ):
+ logger.info("Blocking invite due to spam checker")
+ raise SynapseError(403, "Invites have been disabled on this server")
if invitee:
yield self.update_membership(
@@ -680,8 +769,22 @@ class RoomMemberHandler(object):
requester, id_server, medium, address, room_id, inviter, txn_id=txn_id
)
+ def _get_id_server_target(self, id_server):
+ """Looks up an id_server's actual http endpoint
+
+ Args:
+ id_server (str): the server name to lookup.
+
+ Returns:
+ the http endpoint to connect to.
+ """
+ if id_server in self.rewrite_identity_server_urls:
+ return self.rewrite_identity_server_urls[id_server]
+
+ return id_server
+
@defer.inlineCallbacks
- def _lookup_3pid(self, id_server, medium, address):
+ def _lookup_3pid(self, id_server, medium, address, id_access_token=None):
"""Looks up a 3pid in the passed identity server.
Args:
@@ -689,14 +792,51 @@ class RoomMemberHandler(object):
of the identity server to use.
medium (str): The type of the third party identifier (e.g. "email").
address (str): The third party identifier (e.g. "foo@example.com").
+ id_access_token (str|None): The access token to authenticate to the identity
+ server with
+
+ Returns:
+ str|None: the matrix ID of the 3pid, or None if it is not recognized.
+ """
+ # Rewrite id_server URL if necessary
+ id_server = self._get_id_server_target(id_server)
+
+ if id_access_token is not None:
+ try:
+ results = yield self._lookup_3pid_v2(
+ id_server, id_access_token, medium, address
+ )
+ return results
+
+ except Exception as e:
+ # Catch HttpResponseExcept for a non-200 response code
+ # Check if this identity server does not know about v2 lookups
+ if isinstance(e, HttpResponseException) and e.code == 404:
+ # This is an old identity server that does not yet support v2 lookups
+ logger.warning(
+ "Attempted v2 lookup on v1 identity server %s. Falling "
+ "back to v1",
+ id_server,
+ )
+ else:
+ logger.warning("Error when looking up hashing details: %s", e)
+ return None
+
+ return (yield self._lookup_3pid_v1(id_server, medium, address))
+
+ @defer.inlineCallbacks
+ def _lookup_3pid_v1(self, id_server, medium, address):
+ """Looks up a 3pid in the passed identity server using v1 lookup.
+
+ Args:
+ id_server (str): The server name (including port, if required)
+ of the identity server to use.
+ medium (str): The type of the third party identifier (e.g. "email").
+ address (str): The third party identifier (e.g. "foo@example.com").
Returns:
str: the matrix ID of the 3pid, or None if it is not recognized.
"""
- if not self._enable_lookup:
- raise SynapseError(
- 403, "Looking up third-party identifiers is denied from this server"
- )
try:
data = yield self.simple_http_client.get_json(
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server),
@@ -710,9 +850,116 @@ class RoomMemberHandler(object):
return data["mxid"]
except IOError as e:
- logger.warn("Error from identity server lookup: %s" % (e,))
+ logger.warning("Error from v1 identity server lookup: %s" % (e,))
+
+ return None
+
+ @defer.inlineCallbacks
+ def _lookup_3pid_v2(self, id_server, id_access_token, medium, address):
+ """Looks up a 3pid in the passed identity server using v2 lookup.
+
+ Args:
+ id_server (str): The server name (including port, if required)
+ of the identity server to use.
+ id_access_token (str): The access token to authenticate to the identity server with
+ medium (str): The type of the third party identifier (e.g. "email").
+ address (str): The third party identifier (e.g. "foo@example.com").
+
+ Returns:
+ Deferred[str|None]: the matrix ID of the 3pid, or None if it is not recognised.
+ """
+ # Check what hashing details are supported by this identity server
+ hash_details = yield self.simple_http_client.get_json(
+ "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server),
+ {"access_token": id_access_token},
+ )
+
+ if not isinstance(hash_details, dict):
+ logger.warning(
+ "Got non-dict object when checking hash details of %s%s: %s",
+ id_server_scheme,
+ id_server,
+ hash_details,
+ )
+ raise SynapseError(
+ 400,
+ "Non-dict object from %s%s during v2 hash_details request: %s"
+ % (id_server_scheme, id_server, hash_details),
+ )
+
+ # Extract information from hash_details
+ supported_lookup_algorithms = hash_details.get("algorithms")
+ lookup_pepper = hash_details.get("lookup_pepper")
+ if (
+ not supported_lookup_algorithms
+ or not isinstance(supported_lookup_algorithms, list)
+ or not lookup_pepper
+ or not isinstance(lookup_pepper, str)
+ ):
+ raise SynapseError(
+ 400,
+ "Invalid hash details received from identity server %s%s: %s"
+ % (id_server_scheme, id_server, hash_details),
+ )
+
+ # Check if any of the supported lookup algorithms are present
+ if LookupAlgorithm.SHA256 in supported_lookup_algorithms:
+ # Perform a hashed lookup
+ lookup_algorithm = LookupAlgorithm.SHA256
+
+ # Hash address, medium and the pepper with sha256
+ to_hash = "%s %s %s" % (address, medium, lookup_pepper)
+ lookup_value = sha256_and_url_safe_base64(to_hash)
+
+ elif LookupAlgorithm.NONE in supported_lookup_algorithms:
+ # Perform a non-hashed lookup
+ lookup_algorithm = LookupAlgorithm.NONE
+
+ # Combine together plaintext address and medium
+ lookup_value = "%s %s" % (address, medium)
+
+ else:
+ logger.warning(
+ "None of the provided lookup algorithms of %s are supported: %s",
+ id_server,
+ supported_lookup_algorithms,
+ )
+ raise SynapseError(
+ 400,
+ "Provided identity server does not support any v2 lookup "
+ "algorithms that this homeserver supports.",
+ )
+
+ # Authenticate with identity server given the access token from the client
+ headers = {"Authorization": create_id_access_token_header(id_access_token)}
+
+ try:
+ lookup_results = yield self.simple_http_client.post_json_get_json(
+ "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server),
+ {
+ "addresses": [lookup_value],
+ "algorithm": lookup_algorithm,
+ "pepper": lookup_pepper,
+ },
+ headers=headers,
+ )
+ except Exception as e:
+ logger.warning("Error when performing a v2 3pid lookup: %s", e)
+ raise SynapseError(
+ 500, "Unknown error occurred during identity server lookup"
+ )
+
+ # Check for a mapping from what we looked up to an MXID
+ if "mappings" not in lookup_results or not isinstance(
+ lookup_results["mappings"], dict
+ ):
+ logger.warning("No results from 3pid lookup")
return None
+ # Return the MXID if it's available, or None otherwise
+ mxid = lookup_results["mappings"].get(lookup_value)
+ return mxid
+
@defer.inlineCallbacks
def _verify_any_signature(self, data, server_hostname):
if server_hostname not in data["signatures"]:
@@ -853,9 +1100,10 @@ class RoomMemberHandler(object):
user.
"""
+ target = self._get_id_server_target(id_server)
is_url = "%s%s/_matrix/identity/api/v1/store-invite" % (
id_server_scheme,
- id_server,
+ target,
)
invite_config = {
@@ -870,7 +1118,6 @@ class RoomMemberHandler(object):
"sender_display_name": inviter_display_name,
"sender_avatar_url": inviter_avatar_url,
}
-
try:
data = yield self.simple_http_client.post_json_get_json(
is_url, invite_config
@@ -895,7 +1142,7 @@ class RoomMemberHandler(object):
fallback_public_key = {
"public_key": data["public_key"],
"key_validity_url": "%s%s/_matrix/identity/api/v1/pubkey/isvalid"
- % (id_server_scheme, id_server),
+ % (id_server_scheme, target),
}
else:
fallback_public_key = public_keys[0]
@@ -903,7 +1150,7 @@ class RoomMemberHandler(object):
if not public_keys:
public_keys.append(fallback_public_key)
display_name = data["display_name"]
- return (token, public_keys, fallback_public_key, display_name)
+ return token, public_keys, fallback_public_key, display_name
@defer.inlineCallbacks
def _is_host_in_room(self, current_state_ids):
@@ -962,9 +1209,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
)
if complexity:
- if complexity["v1"] > max_complexity:
- return True
- return False
+ return complexity["v1"] > max_complexity
return None
@defer.inlineCallbacks
@@ -980,10 +1225,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
max_complexity = self.hs.config.limit_remote_rooms.complexity
complexity = yield self.store.get_room_complexity(room_id)
- if complexity["v1"] > max_complexity:
- return True
-
- return False
+ return complexity["v1"] > max_complexity
@defer.inlineCallbacks
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
@@ -1062,7 +1304,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
# The 'except' clause is very broad, but we need to
# capture everything from DNS failures upwards
#
- logger.warn("Failed to reject invite: %s", e)
+ logger.warning("Failed to reject invite: %s", e)
yield self.store.locally_reject_invite(target.to_string(), room_id)
return {}
diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index d90c9e0108..3f50d6de47 100644
--- a/synapse/handlers/set_password.py
+++ b/synapse/handlers/set_password.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
-# Copyright 2017 New Vector Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -30,12 +31,15 @@ class SetPasswordHandler(BaseHandler):
super(SetPasswordHandler, self).__init__(hs)
self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler()
+ self._password_policy_handler = hs.get_password_policy_handler()
@defer.inlineCallbacks
def set_password(self, user_id, newpassword, requester=None):
if not self.hs.config.password_localdb_enabled:
raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN)
+ self._password_policy_handler.validate_password(newpassword)
+
password_hash = yield self._auth_handler.hash(newpassword)
except_device_id = requester.device_id if requester else None
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 4449da6669..3c265f3718 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -14,15 +14,14 @@
# limitations under the License.
import logging
+from collections import Counter
from twisted.internet import defer
-from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.constants import EventTypes, Membership
from synapse.handlers.state_deltas import StateDeltasHandler
from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.types import UserID
-from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
@@ -62,11 +61,10 @@ class StatsHandler(StateDeltasHandler):
def notify_new_event(self):
"""Called when there may be more deltas to process
"""
- if not self.hs.config.stats_enabled:
+ if not self.hs.config.stats_enabled or self._is_processing:
return
- if self._is_processing:
- return
+ self._is_processing = True
@defer.inlineCallbacks
def process():
@@ -75,39 +73,72 @@ class StatsHandler(StateDeltasHandler):
finally:
self._is_processing = False
- self._is_processing = True
run_as_background_process("stats.notify_new_event", process)
@defer.inlineCallbacks
def _unsafe_process(self):
# If self.pos is None then means we haven't fetched it from DB
if self.pos is None:
- self.pos = yield self.store.get_stats_stream_pos()
-
- # If still None then the initial background update hasn't happened yet
- if self.pos is None:
- return None
+ self.pos = yield self.store.get_stats_positions()
# Loop round handling deltas until we're up to date
+
while True:
- with Measure(self.clock, "stats_delta"):
- deltas = yield self.store.get_current_state_deltas(self.pos)
- if not deltas:
- return
+ deltas = yield self.store.get_current_state_deltas(self.pos)
+
+ if deltas:
+ logger.debug("Handling %d state deltas", len(deltas))
+ room_deltas, user_deltas = yield self._handle_deltas(deltas)
+
+ max_pos = deltas[-1]["stream_id"]
+ else:
+ room_deltas = {}
+ user_deltas = {}
+ max_pos = yield self.store.get_room_max_stream_ordering()
- logger.info("Handling %d state deltas", len(deltas))
- yield self._handle_deltas(deltas)
+ # Then count deltas for total_events and total_event_bytes.
+ room_count, user_count = yield self.store.get_changes_room_total_events_and_bytes(
+ self.pos, max_pos
+ )
+
+ for room_id, fields in room_count.items():
+ room_deltas.setdefault(room_id, {}).update(fields)
+
+ for user_id, fields in user_count.items():
+ user_deltas.setdefault(user_id, {}).update(fields)
+
+ logger.debug("room_deltas: %s", room_deltas)
+ logger.debug("user_deltas: %s", user_deltas)
- self.pos = deltas[-1]["stream_id"]
- yield self.store.update_stats_stream_pos(self.pos)
+ # Always call this so that we update the stats position.
+ yield self.store.bulk_update_stats_delta(
+ self.clock.time_msec(),
+ updates={"room": room_deltas, "user": user_deltas},
+ stream_id=max_pos,
+ )
+
+ event_processing_positions.labels("stats").set(max_pos)
- event_processing_positions.labels("stats").set(self.pos)
+ if self.pos == max_pos:
+ break
+
+ self.pos = max_pos
@defer.inlineCallbacks
def _handle_deltas(self, deltas):
+ """Called with the state deltas to process
+
+ Returns:
+ Deferred[tuple[dict[str, Counter], dict[str, counter]]]
+ Resovles to two dicts, the room deltas and the user deltas,
+ mapping from room/user ID to changes in the various fields.
"""
- Called with the state deltas to process
- """
+
+ room_to_stats_deltas = {}
+ user_to_stats_deltas = {}
+
+ room_to_state_updates = {}
+
for delta in deltas:
typ = delta["type"]
state_key = delta["state_key"]
@@ -115,11 +146,10 @@ class StatsHandler(StateDeltasHandler):
event_id = delta["event_id"]
stream_id = delta["stream_id"]
prev_event_id = delta["prev_event_id"]
- stream_pos = delta["stream_id"]
- logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
+ logger.debug("Handling: %r, %r %r, %s", room_id, typ, state_key, event_id)
- token = yield self.store.get_earliest_token_for_room_stats(room_id)
+ token = yield self.store.get_earliest_token_for_stats("room", room_id)
# If the earliest token to begin from is larger than our current
# stream ID, skip processing this delta.
@@ -131,203 +161,132 @@ class StatsHandler(StateDeltasHandler):
continue
if event_id is None and prev_event_id is None:
- # Errr...
+ logger.error(
+ "event ID is None and so is the previous event ID. stream_id: %s",
+ stream_id,
+ )
continue
event_content = {}
+ sender = None
if event_id is not None:
event = yield self.store.get_event(event_id, allow_none=True)
if event:
event_content = event.content or {}
+ sender = event.sender
+
+ # All the values in this dict are deltas (RELATIVE changes)
+ room_stats_delta = room_to_stats_deltas.setdefault(room_id, Counter())
- # We use stream_pos here rather than fetch by event_id as event_id
- # may be None
- now = yield self.store.get_received_ts_by_stream_pos(stream_pos)
+ room_state = room_to_state_updates.setdefault(room_id, {})
- # quantise time to the nearest bucket
- now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size
+ if prev_event_id is None:
+ # this state event doesn't overwrite another,
+ # so it is a new effective/current state event
+ room_stats_delta["current_state_events"] += 1
if typ == EventTypes.Member:
# we could use _get_key_change here but it's a bit inefficient
# given we're not testing for a specific result; might as well
# just grab the prev_membership and membership strings and
# compare them.
- prev_event_content = {}
+ # We take None rather than leave as a previous membership
+ # in the absence of a previous event because we do not want to
+ # reduce the leave count when a new-to-the-room user joins.
+ prev_membership = None
if prev_event_id is not None:
prev_event = yield self.store.get_event(
prev_event_id, allow_none=True
)
if prev_event:
prev_event_content = prev_event.content
+ prev_membership = prev_event_content.get(
+ "membership", Membership.LEAVE
+ )
membership = event_content.get("membership", Membership.LEAVE)
- prev_membership = prev_event_content.get("membership", Membership.LEAVE)
-
- if prev_membership == membership:
- continue
- if prev_membership == Membership.JOIN:
- yield self.store.update_stats_delta(
- now, "room", room_id, "joined_members", -1
- )
+ if prev_membership is None:
+ logger.debug("No previous membership for this user.")
+ elif membership == prev_membership:
+ pass # noop
+ elif prev_membership == Membership.JOIN:
+ room_stats_delta["joined_members"] -= 1
elif prev_membership == Membership.INVITE:
- yield self.store.update_stats_delta(
- now, "room", room_id, "invited_members", -1
- )
+ room_stats_delta["invited_members"] -= 1
elif prev_membership == Membership.LEAVE:
- yield self.store.update_stats_delta(
- now, "room", room_id, "left_members", -1
- )
+ room_stats_delta["left_members"] -= 1
elif prev_membership == Membership.BAN:
- yield self.store.update_stats_delta(
- now, "room", room_id, "banned_members", -1
- )
+ room_stats_delta["banned_members"] -= 1
else:
- err = "%s is not a valid prev_membership" % (repr(prev_membership),)
- logger.error(err)
- raise ValueError(err)
+ raise ValueError(
+ "%r is not a valid prev_membership" % (prev_membership,)
+ )
+ if membership == prev_membership:
+ pass # noop
if membership == Membership.JOIN:
- yield self.store.update_stats_delta(
- now, "room", room_id, "joined_members", +1
- )
+ room_stats_delta["joined_members"] += 1
elif membership == Membership.INVITE:
- yield self.store.update_stats_delta(
- now, "room", room_id, "invited_members", +1
- )
+ room_stats_delta["invited_members"] += 1
+
+ if sender and self.is_mine_id(sender):
+ user_to_stats_deltas.setdefault(sender, Counter())[
+ "invites_sent"
+ ] += 1
+
elif membership == Membership.LEAVE:
- yield self.store.update_stats_delta(
- now, "room", room_id, "left_members", +1
- )
+ room_stats_delta["left_members"] += 1
elif membership == Membership.BAN:
- yield self.store.update_stats_delta(
- now, "room", room_id, "banned_members", +1
- )
+ room_stats_delta["banned_members"] += 1
else:
- err = "%s is not a valid membership" % (repr(membership),)
- logger.error(err)
- raise ValueError(err)
+ raise ValueError("%r is not a valid membership" % (membership,))
user_id = state_key
if self.is_mine_id(user_id):
- # update user_stats as it's one of our users
- public = yield self._is_public_room(room_id)
-
- if membership == Membership.LEAVE:
- yield self.store.update_stats_delta(
- now,
- "user",
- user_id,
- "public_rooms" if public else "private_rooms",
- -1,
- )
- elif membership == Membership.JOIN:
- yield self.store.update_stats_delta(
- now,
- "user",
- user_id,
- "public_rooms" if public else "private_rooms",
- +1,
- )
+ # this accounts for transitions like leave → ban and so on.
+ has_changed_joinedness = (prev_membership == Membership.JOIN) != (
+ membership == Membership.JOIN
+ )
- elif typ == EventTypes.Create:
- # Newly created room. Add it with all blank portions.
- yield self.store.update_room_state(
- room_id,
- {
- "join_rules": None,
- "history_visibility": None,
- "encryption": None,
- "name": None,
- "topic": None,
- "avatar": None,
- "canonical_alias": None,
- },
- )
+ if has_changed_joinedness:
+ delta = +1 if membership == Membership.JOIN else -1
- elif typ == EventTypes.JoinRules:
- yield self.store.update_room_state(
- room_id, {"join_rules": event_content.get("join_rule")}
- )
+ user_to_stats_deltas.setdefault(user_id, Counter())[
+ "joined_rooms"
+ ] += delta
- is_public = yield self._get_key_change(
- prev_event_id, event_id, "join_rule", JoinRules.PUBLIC
- )
- if is_public is not None:
- yield self.update_public_room_stats(now, room_id, is_public)
+ room_stats_delta["local_users_in_room"] += delta
- elif typ == EventTypes.RoomHistoryVisibility:
- yield self.store.update_room_state(
- room_id,
- {"history_visibility": event_content.get("history_visibility")},
+ elif typ == EventTypes.Create:
+ room_state["is_federatable"] = (
+ event_content.get("m.federate", True) is True
)
-
- is_public = yield self._get_key_change(
- prev_event_id, event_id, "history_visibility", "world_readable"
+ if sender and self.is_mine_id(sender):
+ user_to_stats_deltas.setdefault(sender, Counter())[
+ "rooms_created"
+ ] += 1
+ elif typ == EventTypes.JoinRules:
+ room_state["join_rules"] = event_content.get("join_rule")
+ elif typ == EventTypes.RoomHistoryVisibility:
+ room_state["history_visibility"] = event_content.get(
+ "history_visibility"
)
- if is_public is not None:
- yield self.update_public_room_stats(now, room_id, is_public)
-
elif typ == EventTypes.Encryption:
- yield self.store.update_room_state(
- room_id, {"encryption": event_content.get("algorithm")}
- )
+ room_state["encryption"] = event_content.get("algorithm")
elif typ == EventTypes.Name:
- yield self.store.update_room_state(
- room_id, {"name": event_content.get("name")}
- )
+ room_state["name"] = event_content.get("name")
elif typ == EventTypes.Topic:
- yield self.store.update_room_state(
- room_id, {"topic": event_content.get("topic")}
- )
+ room_state["topic"] = event_content.get("topic")
elif typ == EventTypes.RoomAvatar:
- yield self.store.update_room_state(
- room_id, {"avatar": event_content.get("url")}
- )
+ room_state["avatar"] = event_content.get("url")
elif typ == EventTypes.CanonicalAlias:
- yield self.store.update_room_state(
- room_id, {"canonical_alias": event_content.get("alias")}
- )
+ room_state["canonical_alias"] = event_content.get("alias")
+ elif typ == EventTypes.GuestAccess:
+ room_state["guest_access"] = event_content.get("guest_access")
- @defer.inlineCallbacks
- def update_public_room_stats(self, ts, room_id, is_public):
- """
- Increment/decrement a user's number of public rooms when a room they are
- in changes to/from public visibility.
+ for room_id, state in room_to_state_updates.items():
+ yield self.store.update_room_state(room_id, state)
- Args:
- ts (int): Timestamp in seconds
- room_id (str)
- is_public (bool)
- """
- # For now, blindly iterate over all local users in the room so that
- # we can handle the whole problem of copying buckets over as needed
- user_ids = yield self.store.get_users_in_room(room_id)
-
- for user_id in user_ids:
- if self.hs.is_mine(UserID.from_string(user_id)):
- yield self.store.update_stats_delta(
- ts, "user", user_id, "public_rooms", +1 if is_public else -1
- )
- yield self.store.update_stats_delta(
- ts, "user", user_id, "private_rooms", -1 if is_public else +1
- )
-
- @defer.inlineCallbacks
- def _is_public_room(self, room_id):
- join_rules = yield self.state.get_current_state(room_id, EventTypes.JoinRules)
- history_visibility = yield self.state.get_current_state(
- room_id, EventTypes.RoomHistoryVisibility
- )
-
- if (join_rules and join_rules.content.get("join_rule") == JoinRules.PUBLIC) or (
- (
- history_visibility
- and history_visibility.content.get("history_visibility")
- == "world_readable"
- )
- ):
- return True
- else:
- return False
+ return room_to_stats_deltas, user_to_stats_deltas
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 98da2318a0..19bca6717f 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -378,7 +378,7 @@ class SyncHandler(object):
event_copy = {k: v for (k, v) in iteritems(event) if k != "room_id"}
ephemeral_by_room.setdefault(room_id, []).append(event_copy)
- return (now_token, ephemeral_by_room)
+ return now_token, ephemeral_by_room
@defer.inlineCallbacks
def _load_filtered_recents(
@@ -578,7 +578,6 @@ class SyncHandler(object):
if not last_events:
return None
- return
last_event = last_events[-1]
state_ids = yield self.store.get_state_ids_for_event(
@@ -786,9 +785,8 @@ class SyncHandler(object):
batch.events[0].event_id, state_filter=state_filter
)
else:
- # Its not clear how we get here, but empirically we do
- # (#5407). Logging has been added elsewhere to try and
- # figure out where this state comes from.
+ # We can get here if the user has ignored the senders of all
+ # the recent events.
state_at_timeline_start = yield self.get_state_at(
room_id, stream_position=now_token, state_filter=state_filter
)
@@ -1333,7 +1331,7 @@ class SyncHandler(object):
)
if not tags_by_room:
logger.debug("no-oping sync")
- return ([], [], [], [])
+ return [], [], [], []
ignored_account_data = yield self.store.get_global_account_data_by_type_for_user(
"m.ignored_user_list", user_id=user_id
@@ -1643,7 +1641,7 @@ class SyncHandler(object):
)
room_entries.append(entry)
- return (room_entries, invited, newly_joined_rooms, newly_left_rooms)
+ return room_entries, invited, newly_joined_rooms, newly_left_rooms
@defer.inlineCallbacks
def _get_all_rooms(self, sync_result_builder, ignored_users):
@@ -1717,7 +1715,7 @@ class SyncHandler(object):
)
)
- return (room_entries, invited, [])
+ return room_entries, invited, []
@defer.inlineCallbacks
def _generate_room_entry(
@@ -1771,20 +1769,9 @@ class SyncHandler(object):
newly_joined_room=newly_joined,
)
- if not batch and batch.limited:
- # This resulted in #5407, which is weird, so lets log! We do it
- # here as we have the maximum amount of information.
- user_id = sync_result_builder.sync_config.user.to_string()
- logger.info(
- "Issue #5407: Found limited batch with no events. user %s, room %s,"
- " sync_config %s, newly_joined %s, events %s, batch %s.",
- user_id,
- room_id,
- sync_config,
- newly_joined,
- events,
- batch,
- )
+ # Note: `batch` can be both empty and limited here in the case where
+ # `_load_filtered_recents` can't find any events the user should see
+ # (e.g. due to having ignored the sender of the last 50 events).
if newly_joined:
# debug for https://github.com/matrix-org/synapse/issues/4422
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index f882330293..ca8ae9fb5b 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -319,4 +319,4 @@ class TypingNotificationEventSource(object):
return self.get_typing_handler()._latest_room_serial
def get_pagination_rows(self, user, pagination_config, key):
- return ([], pagination_config.from_key)
+ return [], pagination_config.from_key
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 0ac20ebefc..4098855c51 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -35,7 +35,7 @@ from twisted.internet.interfaces import (
)
from twisted.python.failure import Failure
from twisted.web._newclient import ResponseDone
-from twisted.web.client import Agent, HTTPConnectionPool, PartialDownloadError, readBody
+from twisted.web.client import Agent, HTTPConnectionPool, readBody
from twisted.web.http import PotentialDataLoss
from twisted.web.http_headers import Headers
@@ -45,7 +45,9 @@ from synapse.http import (
cancelled_to_request_timed_out_error,
redact_uri,
)
+from synapse.http.proxyagent import ProxyAgent
from synapse.logging.context import make_deferred_yieldable
+from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.util.async_helpers import timeout_deferred
from synapse.util.caches import CACHE_SIZE_FACTOR
@@ -182,7 +184,15 @@ class SimpleHttpClient(object):
using HTTP in Matrix
"""
- def __init__(self, hs, treq_args={}, ip_whitelist=None, ip_blacklist=None):
+ def __init__(
+ self,
+ hs,
+ treq_args={},
+ ip_whitelist=None,
+ ip_blacklist=None,
+ http_proxy=None,
+ https_proxy=None,
+ ):
"""
Args:
hs (synapse.server.HomeServer)
@@ -191,6 +201,8 @@ class SimpleHttpClient(object):
we may not request.
ip_whitelist (netaddr.IPSet): The whitelisted IP addresses, that we can
request if it were otherwise caught in a blacklist.
+ http_proxy (bytes): proxy server to use for http connections. host[:port]
+ https_proxy (bytes): proxy server to use for https connections. host[:port]
"""
self.hs = hs
@@ -235,11 +247,13 @@ class SimpleHttpClient(object):
# The default context factory in Twisted 14.0.0 (which we require) is
# BrowserLikePolicyForHTTPS which will do regular cert validation
# 'like a browser'
- self.agent = Agent(
+ self.agent = ProxyAgent(
self.reactor,
connectTimeout=15,
contextFactory=self.hs.get_http_client_context_factory(),
pool=pool,
+ http_proxy=http_proxy,
+ https_proxy=https_proxy,
)
if self._ip_blacklist:
@@ -269,42 +283,56 @@ class SimpleHttpClient(object):
# log request but strip `access_token` (AS requests for example include this)
logger.info("Sending request %s %s", method, redact_uri(uri))
- try:
- body_producer = None
- if data is not None:
- body_producer = QuieterFileBodyProducer(BytesIO(data))
-
- request_deferred = treq.request(
- method,
- uri,
- agent=self.agent,
- data=body_producer,
- headers=headers,
- **self._extra_treq_args
- )
- request_deferred = timeout_deferred(
- request_deferred,
- 60,
- self.hs.get_reactor(),
- cancelled_to_request_timed_out_error,
- )
- response = yield make_deferred_yieldable(request_deferred)
-
- incoming_responses_counter.labels(method, response.code).inc()
- logger.info(
- "Received response to %s %s: %s", method, redact_uri(uri), response.code
- )
- return response
- except Exception as e:
- incoming_responses_counter.labels(method, "ERR").inc()
- logger.info(
- "Error sending request to %s %s: %s %s",
- method,
- redact_uri(uri),
- type(e).__name__,
- e.args[0],
- )
- raise
+ with start_active_span(
+ "outgoing-client-request",
+ tags={
+ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
+ tags.HTTP_METHOD: method,
+ tags.HTTP_URL: uri,
+ },
+ finish_on_close=True,
+ ):
+ try:
+ body_producer = None
+ if data is not None:
+ body_producer = QuieterFileBodyProducer(BytesIO(data))
+
+ request_deferred = treq.request(
+ method,
+ uri,
+ agent=self.agent,
+ data=body_producer,
+ headers=headers,
+ **self._extra_treq_args
+ )
+ request_deferred = timeout_deferred(
+ request_deferred,
+ 60,
+ self.hs.get_reactor(),
+ cancelled_to_request_timed_out_error,
+ )
+ response = yield make_deferred_yieldable(request_deferred)
+
+ incoming_responses_counter.labels(method, response.code).inc()
+ logger.info(
+ "Received response to %s %s: %s",
+ method,
+ redact_uri(uri),
+ response.code,
+ )
+ return response
+ except Exception as e:
+ incoming_responses_counter.labels(method, "ERR").inc()
+ logger.info(
+ "Error sending request to %s %s: %s %s",
+ method,
+ redact_uri(uri),
+ type(e).__name__,
+ e.args[0],
+ )
+ set_tag(tags.ERROR, True)
+ set_tag("error_reason", e.args[0])
+ raise
@defer.inlineCallbacks
def post_urlencoded_get_json(self, uri, args={}, headers=None):
@@ -599,38 +627,6 @@ def _readBodyToFile(response, stream, max_size):
return d
-class CaptchaServerHttpClient(SimpleHttpClient):
- """
- Separate HTTP client for talking to google's captcha servers
- Only slightly special because accepts partial download responses
-
- used only by c/s api v1
- """
-
- @defer.inlineCallbacks
- def post_urlencoded_get_raw(self, url, args={}):
- query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True)
-
- response = yield self.request(
- "POST",
- url,
- data=query_bytes,
- headers=Headers(
- {
- b"Content-Type": [b"application/x-www-form-urlencoded"],
- b"User-Agent": [self.user_agent],
- }
- ),
- )
-
- try:
- body = yield make_deferred_yieldable(readBody(response))
- return body
- except PartialDownloadError as e:
- # twisted dislikes google's response, no content length.
- return e.response
-
-
def encode_urlencode_args(args):
return {k: encode_urlencode_arg(v) for k, v in args.items()}
diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py
new file mode 100644
index 0000000000..be7b2ceb8e
--- /dev/null
+++ b/synapse/http/connectproxyclient.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from zope.interface import implementer
+
+from twisted.internet import defer, protocol
+from twisted.internet.error import ConnectError
+from twisted.internet.interfaces import IStreamClientEndpoint
+from twisted.internet.protocol import connectionDone
+from twisted.web import http
+
+logger = logging.getLogger(__name__)
+
+
+class ProxyConnectError(ConnectError):
+ pass
+
+
+@implementer(IStreamClientEndpoint)
+class HTTPConnectProxyEndpoint(object):
+ """An Endpoint implementation which will send a CONNECT request to an http proxy
+
+ Wraps an existing HostnameEndpoint for the proxy.
+
+ When we get the connect() request from the connection pool (via the TLS wrapper),
+ we'll first connect to the proxy endpoint with a ProtocolFactory which will make the
+ CONNECT request. Once that completes, we invoke the protocolFactory which was passed
+ in.
+
+ Args:
+ reactor: the Twisted reactor to use for the connection
+ proxy_endpoint (IStreamClientEndpoint): the endpoint to use to connect to the
+ proxy
+ host (bytes): hostname that we want to CONNECT to
+ port (int): port that we want to connect to
+ """
+
+ def __init__(self, reactor, proxy_endpoint, host, port):
+ self._reactor = reactor
+ self._proxy_endpoint = proxy_endpoint
+ self._host = host
+ self._port = port
+
+ def __repr__(self):
+ return "<HTTPConnectProxyEndpoint %s>" % (self._proxy_endpoint,)
+
+ def connect(self, protocolFactory):
+ f = HTTPProxiedClientFactory(self._host, self._port, protocolFactory)
+ d = self._proxy_endpoint.connect(f)
+ # once the tcp socket connects successfully, we need to wait for the
+ # CONNECT to complete.
+ d.addCallback(lambda conn: f.on_connection)
+ return d
+
+
+class HTTPProxiedClientFactory(protocol.ClientFactory):
+ """ClientFactory wrapper that triggers an HTTP proxy CONNECT on connect.
+
+ Once the CONNECT completes, invokes the original ClientFactory to build the
+ HTTP Protocol object and run the rest of the connection.
+
+ Args:
+ dst_host (bytes): hostname that we want to CONNECT to
+ dst_port (int): port that we want to connect to
+ wrapped_factory (protocol.ClientFactory): The original Factory
+ """
+
+ def __init__(self, dst_host, dst_port, wrapped_factory):
+ self.dst_host = dst_host
+ self.dst_port = dst_port
+ self.wrapped_factory = wrapped_factory
+ self.on_connection = defer.Deferred()
+
+ def startedConnecting(self, connector):
+ return self.wrapped_factory.startedConnecting(connector)
+
+ def buildProtocol(self, addr):
+ wrapped_protocol = self.wrapped_factory.buildProtocol(addr)
+
+ return HTTPConnectProtocol(
+ self.dst_host, self.dst_port, wrapped_protocol, self.on_connection
+ )
+
+ def clientConnectionFailed(self, connector, reason):
+ logger.debug("Connection to proxy failed: %s", reason)
+ if not self.on_connection.called:
+ self.on_connection.errback(reason)
+ return self.wrapped_factory.clientConnectionFailed(connector, reason)
+
+ def clientConnectionLost(self, connector, reason):
+ logger.debug("Connection to proxy lost: %s", reason)
+ if not self.on_connection.called:
+ self.on_connection.errback(reason)
+ return self.wrapped_factory.clientConnectionLost(connector, reason)
+
+
+class HTTPConnectProtocol(protocol.Protocol):
+ """Protocol that wraps an existing Protocol to do a CONNECT handshake at connect
+
+ Args:
+ host (bytes): The original HTTP(s) hostname or IPv4 or IPv6 address literal
+ to put in the CONNECT request
+
+ port (int): The original HTTP(s) port to put in the CONNECT request
+
+ wrapped_protocol (interfaces.IProtocol): the original protocol (probably
+ HTTPChannel or TLSMemoryBIOProtocol, but could be anything really)
+
+ connected_deferred (Deferred): a Deferred which will be callbacked with
+ wrapped_protocol when the CONNECT completes
+ """
+
+ def __init__(self, host, port, wrapped_protocol, connected_deferred):
+ self.host = host
+ self.port = port
+ self.wrapped_protocol = wrapped_protocol
+ self.connected_deferred = connected_deferred
+ self.http_setup_client = HTTPConnectSetupClient(self.host, self.port)
+ self.http_setup_client.on_connected.addCallback(self.proxyConnected)
+
+ def connectionMade(self):
+ self.http_setup_client.makeConnection(self.transport)
+
+ def connectionLost(self, reason=connectionDone):
+ if self.wrapped_protocol.connected:
+ self.wrapped_protocol.connectionLost(reason)
+
+ self.http_setup_client.connectionLost(reason)
+
+ if not self.connected_deferred.called:
+ self.connected_deferred.errback(reason)
+
+ def proxyConnected(self, _):
+ self.wrapped_protocol.makeConnection(self.transport)
+
+ self.connected_deferred.callback(self.wrapped_protocol)
+
+ # Get any pending data from the http buf and forward it to the original protocol
+ buf = self.http_setup_client.clearLineBuffer()
+ if buf:
+ self.wrapped_protocol.dataReceived(buf)
+
+ def dataReceived(self, data):
+ # if we've set up the HTTP protocol, we can send the data there
+ if self.wrapped_protocol.connected:
+ return self.wrapped_protocol.dataReceived(data)
+
+ # otherwise, we must still be setting up the connection: send the data to the
+ # setup client
+ return self.http_setup_client.dataReceived(data)
+
+
+class HTTPConnectSetupClient(http.HTTPClient):
+ """HTTPClient protocol to send a CONNECT message for proxies and read the response.
+
+ Args:
+ host (bytes): The hostname to send in the CONNECT message
+ port (int): The port to send in the CONNECT message
+ """
+
+ def __init__(self, host, port):
+ self.host = host
+ self.port = port
+ self.on_connected = defer.Deferred()
+
+ def connectionMade(self):
+ logger.debug("Connected to proxy, sending CONNECT")
+ self.sendCommand(b"CONNECT", b"%s:%d" % (self.host, self.port))
+ self.endHeaders()
+
+ def handleStatus(self, version, status, message):
+ logger.debug("Got Status: %s %s %s", status, message, version)
+ if status != b"200":
+ raise ProxyConnectError("Unexpected status on CONNECT: %s" % status)
+
+ def handleEndHeaders(self):
+ logger.debug("End Headers")
+ self.on_connected.callback(None)
+
+ def handleResponse(self, body):
+ pass
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 71a15f434d..feae7de5be 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -14,21 +14,21 @@
# limitations under the License.
import logging
+import urllib
-import attr
-from netaddr import IPAddress
+from netaddr import AddrFormatError, IPAddress
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.internet.interfaces import IStreamClientEndpoint
-from twisted.web.client import URI, Agent, HTTPConnectionPool
+from twisted.web.client import Agent, HTTPConnectionPool
from twisted.web.http_headers import Headers
-from twisted.web.iweb import IAgent
+from twisted.web.iweb import IAgent, IAgentEndpointFactory
-from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
+from synapse.http.federation.srv_resolver import Server, SrvResolver
from synapse.http.federation.well_known_resolver import WellKnownResolver
-from synapse.logging.context import make_deferred_yieldable
+from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.util import Clock
logger = logging.getLogger(__name__)
@@ -36,8 +36,9 @@ logger = logging.getLogger(__name__)
@implementer(IAgent)
class MatrixFederationAgent(object):
- """An Agent-like thing which provides a `request` method which will look up a matrix
- server and send an HTTP request to it.
+ """An Agent-like thing which provides a `request` method which correctly
+ handles resolving matrix server names when using matrix://. Handles standard
+ https URIs as normal.
Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
@@ -51,9 +52,9 @@ class MatrixFederationAgent(object):
SRVResolver impl to use for looking up SRV records. None to use a default
implementation.
- _well_known_cache (TTLCache|None):
- TTLCache impl for storing cached well-known lookups. None to use a default
- implementation.
+ _well_known_resolver (WellKnownResolver|None):
+ WellKnownResolver to use to perform well-known lookups. None to use a
+ default implementation.
"""
def __init__(
@@ -61,49 +62,49 @@ class MatrixFederationAgent(object):
reactor,
tls_client_options_factory,
_srv_resolver=None,
- _well_known_cache=None,
+ _well_known_resolver=None,
):
self._reactor = reactor
self._clock = Clock(reactor)
-
- self._tls_client_options_factory = tls_client_options_factory
- if _srv_resolver is None:
- _srv_resolver = SrvResolver()
- self._srv_resolver = _srv_resolver
-
self._pool = HTTPConnectionPool(reactor)
self._pool.retryAutomatically = False
self._pool.maxPersistentPerHost = 5
self._pool.cachedConnectionTimeout = 2 * 60
- self._well_known_resolver = WellKnownResolver(
+ self._agent = Agent.usingEndpointFactory(
self._reactor,
- agent=Agent(
- self._reactor,
- pool=self._pool,
- contextFactory=tls_client_options_factory,
+ MatrixHostnameEndpointFactory(
+ reactor, tls_client_options_factory, _srv_resolver
),
- well_known_cache=_well_known_cache,
+ pool=self._pool,
)
+ if _well_known_resolver is None:
+ _well_known_resolver = WellKnownResolver(
+ self._reactor,
+ agent=Agent(
+ self._reactor,
+ pool=self._pool,
+ contextFactory=tls_client_options_factory,
+ ),
+ )
+
+ self._well_known_resolver = _well_known_resolver
+
@defer.inlineCallbacks
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Args:
method (bytes): HTTP method: GET/POST/etc
-
uri (bytes): Absolute URI to be retrieved
-
headers (twisted.web.http_headers.Headers|None):
HTTP headers to send with the request, or None to
send no extra headers.
-
bodyProducer (twisted.web.iweb.IBodyProducer|None):
An object which can generate bytes to make up the
body of this request (for example, the properly encoded contents of
a file for a file upload). Or None if the request is to have
no body.
-
Returns:
Deferred[twisted.web.iweb.IResponse]:
fires when the header of the response has been received (regardless of the
@@ -111,210 +112,207 @@ class MatrixFederationAgent(object):
response from being received (including problems that prevent the request
from being sent).
"""
- parsed_uri = URI.fromBytes(uri, defaultPort=-1)
- res = yield self._route_matrix_uri(parsed_uri)
+ # We use urlparse as that will set `port` to None if there is no
+ # explicit port.
+ parsed_uri = urllib.parse.urlparse(uri)
- # set up the TLS connection params
+ # If this is a matrix:// URI check if the server has delegated matrix
+ # traffic using well-known delegation.
#
- # XXX disabling TLS is really only supported here for the benefit of the
- # unit tests. We should make the UTs cope with TLS rather than having to make
- # the code support the unit tests.
- if self._tls_client_options_factory is None:
- tls_options = None
- else:
- tls_options = self._tls_client_options_factory.get_options(
- res.tls_server_name.decode("ascii")
+ # We have to do this here and not in the endpoint as we need to rewrite
+ # the host header with the delegated server name.
+ delegated_server = None
+ if (
+ parsed_uri.scheme == b"matrix"
+ and not _is_ip_literal(parsed_uri.hostname)
+ and not parsed_uri.port
+ ):
+ well_known_result = yield self._well_known_resolver.get_well_known(
+ parsed_uri.hostname
+ )
+ delegated_server = well_known_result.delegated_server
+
+ if delegated_server:
+ # Ok, the server has delegated matrix traffic to somewhere else, so
+ # lets rewrite the URL to replace the server with the delegated
+ # server name.
+ uri = urllib.parse.urlunparse(
+ (
+ parsed_uri.scheme,
+ delegated_server,
+ parsed_uri.path,
+ parsed_uri.params,
+ parsed_uri.query,
+ parsed_uri.fragment,
+ )
)
+ parsed_uri = urllib.parse.urlparse(uri)
- # make sure that the Host header is set correctly
+ # We need to make sure the host header is set to the netloc of the
+ # server.
if headers is None:
headers = Headers()
else:
headers = headers.copy()
if not headers.hasHeader(b"host"):
- headers.addRawHeader(b"host", res.host_header)
+ headers.addRawHeader(b"host", parsed_uri.netloc)
- class EndpointFactory(object):
- @staticmethod
- def endpointForURI(_uri):
- ep = LoggingHostnameEndpoint(
- self._reactor, res.target_host, res.target_port
- )
- if tls_options is not None:
- ep = wrapClientTLS(tls_options, ep)
- return ep
-
- agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool)
res = yield make_deferred_yieldable(
- agent.request(method, uri, headers, bodyProducer)
+ self._agent.request(method, uri, headers, bodyProducer)
)
+
return res
- @defer.inlineCallbacks
- def _route_matrix_uri(self, parsed_uri, lookup_well_known=True):
- """Helper for `request`: determine the routing for a Matrix URI
- Args:
- parsed_uri (twisted.web.client.URI): uri to route. Note that it should be
- parsed with URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1
- if there is no explicit port given.
+@implementer(IAgentEndpointFactory)
+class MatrixHostnameEndpointFactory(object):
+ """Factory for MatrixHostnameEndpoint for parsing to an Agent.
+ """
- lookup_well_known (bool): True if we should look up the .well-known file if
- there is no SRV record.
+ def __init__(self, reactor, tls_client_options_factory, srv_resolver):
+ self._reactor = reactor
+ self._tls_client_options_factory = tls_client_options_factory
- Returns:
- Deferred[_RoutingResult]
- """
- # check for an IP literal
- try:
- ip_address = IPAddress(parsed_uri.host.decode("ascii"))
- except Exception:
- # not an IP address
- ip_address = None
-
- if ip_address:
- port = parsed_uri.port
- if port == -1:
- port = 8448
- return _RoutingResult(
- host_header=parsed_uri.netloc,
- tls_server_name=parsed_uri.host,
- target_host=parsed_uri.host,
- target_port=port,
- )
+ if srv_resolver is None:
+ srv_resolver = SrvResolver()
- if parsed_uri.port != -1:
- # there is an explicit port
- return _RoutingResult(
- host_header=parsed_uri.netloc,
- tls_server_name=parsed_uri.host,
- target_host=parsed_uri.host,
- target_port=parsed_uri.port,
- )
+ self._srv_resolver = srv_resolver
- if lookup_well_known:
- # try a .well-known lookup
- well_known_result = yield self._well_known_resolver.get_well_known(
- parsed_uri.host
- )
- well_known_server = well_known_result.delegated_server
-
- if well_known_server:
- # if we found a .well-known, start again, but don't do another
- # .well-known lookup.
-
- # parse the server name in the .well-known response into host/port.
- # (This code is lifted from twisted.web.client.URI.fromBytes).
- if b":" in well_known_server:
- well_known_host, well_known_port = well_known_server.rsplit(b":", 1)
- try:
- well_known_port = int(well_known_port)
- except ValueError:
- # the part after the colon could not be parsed as an int
- # - we assume it is an IPv6 literal with no port (the closing
- # ']' stops it being parsed as an int)
- well_known_host, well_known_port = well_known_server, -1
- else:
- well_known_host, well_known_port = well_known_server, -1
-
- new_uri = URI(
- scheme=parsed_uri.scheme,
- netloc=well_known_server,
- host=well_known_host,
- port=well_known_port,
- path=parsed_uri.path,
- params=parsed_uri.params,
- query=parsed_uri.query,
- fragment=parsed_uri.fragment,
- )
+ def endpointForURI(self, parsed_uri):
+ return MatrixHostnameEndpoint(
+ self._reactor,
+ self._tls_client_options_factory,
+ self._srv_resolver,
+ parsed_uri,
+ )
- res = yield self._route_matrix_uri(new_uri, lookup_well_known=False)
- return res
-
- # try a SRV lookup
- service_name = b"_matrix._tcp.%s" % (parsed_uri.host,)
- server_list = yield self._srv_resolver.resolve_service(service_name)
-
- if not server_list:
- target_host = parsed_uri.host
- port = 8448
- logger.debug(
- "No SRV record for %s, using %s:%i",
- parsed_uri.host.decode("ascii"),
- target_host.decode("ascii"),
- port,
- )
+
+@implementer(IStreamClientEndpoint)
+class MatrixHostnameEndpoint(object):
+ """An endpoint that resolves matrix:// URLs using Matrix server name
+ resolution (i.e. via SRV). Does not check for well-known delegation.
+
+ Args:
+ reactor (IReactor)
+ tls_client_options_factory (ClientTLSOptionsFactory|None):
+ factory to use for fetching client tls options, or none to disable TLS.
+ srv_resolver (SrvResolver): The SRV resolver to use
+ parsed_uri (twisted.web.client.URI): The parsed URI that we're wanting
+ to connect to.
+ """
+
+ def __init__(self, reactor, tls_client_options_factory, srv_resolver, parsed_uri):
+ self._reactor = reactor
+
+ self._parsed_uri = parsed_uri
+
+ # set up the TLS connection params
+ #
+ # XXX disabling TLS is really only supported here for the benefit of the
+ # unit tests. We should make the UTs cope with TLS rather than having to make
+ # the code support the unit tests.
+
+ if tls_client_options_factory is None:
+ self._tls_options = None
else:
- target_host, port = pick_server_from_list(server_list)
- logger.debug(
- "Picked %s:%i from SRV records for %s",
- target_host.decode("ascii"),
- port,
- parsed_uri.host.decode("ascii"),
+ self._tls_options = tls_client_options_factory.get_options(
+ self._parsed_uri.host.decode("ascii")
)
- return _RoutingResult(
- host_header=parsed_uri.netloc,
- tls_server_name=parsed_uri.host,
- target_host=target_host,
- target_port=port,
- )
+ self._srv_resolver = srv_resolver
+ def connect(self, protocol_factory):
+ """Implements IStreamClientEndpoint interface
+ """
-@implementer(IStreamClientEndpoint)
-class LoggingHostnameEndpoint(object):
- """A wrapper for HostnameEndpint which logs when it connects"""
+ return run_in_background(self._do_connect, protocol_factory)
- def __init__(self, reactor, host, port, *args, **kwargs):
- self.host = host
- self.port = port
- self.ep = HostnameEndpoint(reactor, host, port, *args, **kwargs)
+ @defer.inlineCallbacks
+ def _do_connect(self, protocol_factory):
+ first_exception = None
+
+ server_list = yield self._resolve_server()
+
+ for server in server_list:
+ host = server.host
+ port = server.port
+
+ try:
+ logger.info("Connecting to %s:%i", host.decode("ascii"), port)
+ endpoint = HostnameEndpoint(self._reactor, host, port)
+ if self._tls_options:
+ endpoint = wrapClientTLS(self._tls_options, endpoint)
+ result = yield make_deferred_yieldable(
+ endpoint.connect(protocol_factory)
+ )
- def connect(self, protocol_factory):
- logger.info("Connecting to %s:%i", self.host.decode("ascii"), self.port)
- return self.ep.connect(protocol_factory)
+ return result
+ except Exception as e:
+ logger.info(
+ "Failed to connect to %s:%i: %s", host.decode("ascii"), port, e
+ )
+ if not first_exception:
+ first_exception = e
+ # We return the first failure because that's probably the most interesting.
+ if first_exception:
+ raise first_exception
-@attr.s
-class _RoutingResult(object):
- """The result returned by `_route_matrix_uri`.
+ # This shouldn't happen as we should always have at least one host/port
+ # to try and if that doesn't work then we'll have an exception.
+ raise Exception("Failed to resolve server %r" % (self._parsed_uri.netloc,))
- Contains the parameters needed to direct a federation connection to a particular
- server.
+ @defer.inlineCallbacks
+ def _resolve_server(self):
+ """Resolves the server name to a list of hosts and ports to attempt to
+ connect to.
- Where a SRV record points to several servers, this object contains a single server
- chosen from the list.
- """
+ Returns:
+ Deferred[list[Server]]
+ """
- host_header = attr.ib()
- """
- The value we should assign to the Host header (host:port from the matrix
- URI, or .well-known).
+ if self._parsed_uri.scheme != b"matrix":
+ return [Server(host=self._parsed_uri.host, port=self._parsed_uri.port)]
- :type: bytes
- """
+ # Note: We don't do well-known lookup as that needs to have happened
+ # before now, due to needing to rewrite the Host header of the HTTP
+ # request.
- tls_server_name = attr.ib()
- """
- The server name we should set in the SNI (typically host, without port, from the
- matrix URI or .well-known)
+ # We reparse the URI so that defaultPort is -1 rather than 80
+ parsed_uri = urllib.parse.urlparse(self._parsed_uri.toBytes())
- :type: bytes
- """
+ host = parsed_uri.hostname
+ port = parsed_uri.port
- target_host = attr.ib()
- """
- The hostname (or IP literal) we should route the TCP connection to (the target of the
- SRV record, or the hostname from the URL/.well-known)
+ # If there is an explicit port or the host is an IP address we bypass
+ # SRV lookups and just use the given host/port.
+ if port or _is_ip_literal(host):
+ return [Server(host, port or 8448)]
- :type: bytes
- """
+ server_list = yield self._srv_resolver.resolve_service(b"_matrix._tcp." + host)
+
+ if server_list:
+ return server_list
+
+ # No SRV records, so we fallback to host and 8448
+ return [Server(host, 8448)]
- target_port = attr.ib()
- """
- The port we should route the TCP connection to (the target of the SRV record, or
- the port from the URL/.well-known, or 8448)
- :type: int
+def _is_ip_literal(host):
+ """Test if the given host name is either an IPv4 or IPv6 literal.
+
+ Args:
+ host (bytes)
+
+ Returns:
+ bool
"""
+
+ host = host.decode("ascii")
+
+ try:
+ IPAddress(host)
+ return True
+ except AddrFormatError:
+ return False
diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py
index b32188766d..3fe4ffb9e5 100644
--- a/synapse/http/federation/srv_resolver.py
+++ b/synapse/http/federation/srv_resolver.py
@@ -32,7 +32,7 @@ logger = logging.getLogger(__name__)
SERVER_CACHE = {}
-@attr.s
+@attr.s(slots=True, frozen=True)
class Server(object):
"""
Our record of an individual server which can be tried to reach a destination.
@@ -53,34 +53,47 @@ class Server(object):
expires = attr.ib(default=0)
-def pick_server_from_list(server_list):
- """Randomly choose a server from the server list
+def _sort_server_list(server_list):
+ """Given a list of SRV records sort them into priority order and shuffle
+ each priority with the given weight.
+ """
+ priority_map = {}
- Args:
- server_list (list[Server]): list of candidate servers
+ for server in server_list:
+ priority_map.setdefault(server.priority, []).append(server)
- Returns:
- Tuple[bytes, int]: (host, port) pair for the chosen server
- """
- if not server_list:
- raise RuntimeError("pick_server_from_list called with empty list")
+ results = []
+ for priority in sorted(priority_map):
+ servers = priority_map[priority]
+
+ # This algorithms roughly follows the algorithm described in RFC2782,
+ # changed to remove an off-by-one error.
+ #
+ # N.B. Weights can be zero, which means that they should be picked
+ # rarely.
+
+ total_weight = sum(s.weight for s in servers)
+
+ # Total weight can become zero if there are only zero weight servers
+ # left, which we handle by just shuffling and appending to the results.
+ while servers and total_weight:
+ target_weight = random.randint(1, total_weight)
- # TODO: currently we only use the lowest-priority servers. We should maintain a
- # cache of servers known to be "down" and filter them out
+ for s in servers:
+ target_weight -= s.weight
- min_priority = min(s.priority for s in server_list)
- eligible_servers = list(s for s in server_list if s.priority == min_priority)
- total_weight = sum(s.weight for s in eligible_servers)
- target_weight = random.randint(0, total_weight)
+ if target_weight <= 0:
+ break
- for s in eligible_servers:
- target_weight -= s.weight
+ results.append(s)
+ servers.remove(s)
+ total_weight -= s.weight
- if target_weight <= 0:
- return s.host, s.port
+ if servers:
+ random.shuffle(servers)
+ results.extend(servers)
- # this should be impossible.
- raise RuntimeError("pick_server_from_list got to end of eligible server list.")
+ return results
class SrvResolver(object):
@@ -120,7 +133,7 @@ class SrvResolver(object):
if cache_entry:
if all(s.expires > now for s in cache_entry):
servers = list(cache_entry)
- return servers
+ return _sort_server_list(servers)
try:
answers, _, _ = yield make_deferred_yieldable(
@@ -169,4 +182,4 @@ class SrvResolver(object):
)
self._cache[service_name] = list(servers)
- return servers
+ return _sort_server_list(servers)
diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py
index d2866ff67d..7ddfad286d 100644
--- a/synapse/http/federation/well_known_resolver.py
+++ b/synapse/http/federation/well_known_resolver.py
@@ -32,22 +32,40 @@ from synapse.util.metrics import Measure
# period to cache .well-known results for by default
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
-# jitter to add to the .well-known default cache ttl
-WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
+# jitter factor to add to the .well-known default cache ttls
+WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 0.1
# period to cache failure to fetch .well-known for
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
+# period to cache failure to fetch .well-known if there has recently been a
+# valid well-known for that domain.
+WELL_KNOWN_DOWN_CACHE_PERIOD = 2 * 60
+
+# period to remember there was a valid well-known after valid record expires
+WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID = 2 * 3600
+
# cap for .well-known cache period
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
# lower bound for .well-known cache period
WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60
+# Attempt to refetch a cached well-known N% of the TTL before it expires.
+# e.g. if set to 0.2 and we have a cached entry with a TTL of 5mins, then
+# we'll start trying to refetch 1 minute before it expires.
+WELL_KNOWN_GRACE_PERIOD_FACTOR = 0.2
+
+# Number of times we retry fetching a well-known for a domain we know recently
+# had a valid entry.
+WELL_KNOWN_RETRY_ATTEMPTS = 3
+
+
logger = logging.getLogger(__name__)
_well_known_cache = TTLCache("well-known")
+_had_valid_well_known_cache = TTLCache("had-valid-well-known")
@attr.s(slots=True, frozen=True)
@@ -59,14 +77,20 @@ class WellKnownResolver(object):
"""Handles well-known lookups for matrix servers.
"""
- def __init__(self, reactor, agent, well_known_cache=None):
+ def __init__(
+ self, reactor, agent, well_known_cache=None, had_well_known_cache=None
+ ):
self._reactor = reactor
self._clock = Clock(reactor)
if well_known_cache is None:
well_known_cache = _well_known_cache
+ if had_well_known_cache is None:
+ had_well_known_cache = _had_valid_well_known_cache
+
self._well_known_cache = well_known_cache
+ self._had_valid_well_known_cache = had_well_known_cache
self._well_known_agent = RedirectAgent(agent)
@defer.inlineCallbacks
@@ -80,59 +104,86 @@ class WellKnownResolver(object):
Deferred[WellKnownLookupResult]: The result of the lookup
"""
try:
- result = self._well_known_cache[server_name]
+ prev_result, expiry, ttl = self._well_known_cache.get_with_expiry(
+ server_name
+ )
+
+ now = self._clock.time()
+ if now < expiry - WELL_KNOWN_GRACE_PERIOD_FACTOR * ttl:
+ return WellKnownLookupResult(delegated_server=prev_result)
except KeyError:
- # TODO: should we linearise so that we don't end up doing two .well-known
- # requests for the same server in parallel?
+ prev_result = None
+
+ # TODO: should we linearise so that we don't end up doing two .well-known
+ # requests for the same server in parallel?
+ try:
with Measure(self._clock, "get_well_known"):
- result, cache_period = yield self._do_get_well_known(server_name)
+ result, cache_period = yield self._fetch_well_known(server_name)
+
+ except _FetchWellKnownFailure as e:
+ if prev_result and e.temporary:
+ # This is a temporary failure and we have a still valid cached
+ # result, so lets return that. Hopefully the next time we ask
+ # the remote will be back up again.
+ return WellKnownLookupResult(delegated_server=prev_result)
+
+ result = None
+
+ if self._had_valid_well_known_cache.get(server_name, False):
+ # We have recently seen a valid well-known record for this
+ # server, so we cache the lack of well-known for a shorter time.
+ cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD
+ else:
+ cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
+
+ # add some randomness to the TTL to avoid a stampeding herd
+ cache_period *= random.uniform(
+ 1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
+ 1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
+ )
- if cache_period > 0:
- self._well_known_cache.set(server_name, result, cache_period)
+ if cache_period > 0:
+ self._well_known_cache.set(server_name, result, cache_period)
return WellKnownLookupResult(delegated_server=result)
@defer.inlineCallbacks
- def _do_get_well_known(self, server_name):
+ def _fetch_well_known(self, server_name):
"""Actually fetch and parse a .well-known, without checking the cache
Args:
server_name (bytes): name of the server, from the requested url
+ Raises:
+ _FetchWellKnownFailure if we fail to lookup a result
+
Returns:
- Deferred[Tuple[bytes|None|object],int]:
- result, cache period, where result is one of:
- - the new server name from the .well-known (as a `bytes`)
- - None if there was no .well-known file.
- - INVALID_WELL_KNOWN if the .well-known was invalid
+ Deferred[Tuple[bytes,int]]: The lookup result and cache period.
"""
- uri = b"https://%s/.well-known/matrix/server" % (server_name,)
- uri_str = uri.decode("ascii")
- logger.info("Fetching %s", uri_str)
+
+ had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False)
+
+ # We do this in two steps to differentiate between possibly transient
+ # errors (e.g. can't connect to host, 503 response) and more permenant
+ # errors (such as getting a 404 response).
+ response, body = yield self._make_well_known_request(
+ server_name, retry=had_valid_well_known
+ )
+
try:
- response = yield make_deferred_yieldable(
- self._well_known_agent.request(b"GET", uri)
- )
- body = yield make_deferred_yieldable(readBody(response))
if response.code != 200:
raise Exception("Non-200 response %s" % (response.code,))
parsed_body = json.loads(body.decode("utf-8"))
logger.info("Response from .well-known: %s", parsed_body)
- if not isinstance(parsed_body, dict):
- raise Exception("not a dict")
- if "m.server" not in parsed_body:
- raise Exception("Missing key 'm.server'")
- except Exception as e:
- logger.info("Error fetching %s: %s", uri_str, e)
-
- # add some randomness to the TTL to avoid a stampeding herd every hour
- # after startup
- cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
- cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
- return (None, cache_period)
- result = parsed_body["m.server"].encode("ascii")
+ result = parsed_body["m.server"].encode("ascii")
+ except defer.CancelledError:
+ # Bail if we've been cancelled
+ raise
+ except Exception as e:
+ logger.info("Error parsing well-known for %s: %s", server_name, e)
+ raise _FetchWellKnownFailure(temporary=False)
cache_period = _cache_period_from_headers(
response.headers, time_now=self._reactor.seconds
@@ -141,12 +192,68 @@ class WellKnownResolver(object):
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
# after startup
- cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
+ cache_period *= random.uniform(
+ 1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
+ 1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
+ )
else:
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD)
- return (result, cache_period)
+ # We got a success, mark as such in the cache
+ self._had_valid_well_known_cache.set(
+ server_name,
+ bool(result),
+ cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID,
+ )
+
+ return result, cache_period
+
+ @defer.inlineCallbacks
+ def _make_well_known_request(self, server_name, retry):
+ """Make the well known request.
+
+ This will retry the request if requested and it fails (with unable
+ to connect or receives a 5xx error).
+
+ Args:
+ server_name (bytes)
+ retry (bool): Whether to retry the request if it fails.
+
+ Returns:
+ Deferred[tuple[IResponse, bytes]] Returns the response object and
+ body. Response may be a non-200 response.
+ """
+ uri = b"https://%s/.well-known/matrix/server" % (server_name,)
+ uri_str = uri.decode("ascii")
+
+ i = 0
+ while True:
+ i += 1
+
+ logger.info("Fetching %s", uri_str)
+ try:
+ response = yield make_deferred_yieldable(
+ self._well_known_agent.request(b"GET", uri)
+ )
+ body = yield make_deferred_yieldable(readBody(response))
+
+ if 500 <= response.code < 600:
+ raise Exception("Non-200 response %s" % (response.code,))
+
+ return response, body
+ except defer.CancelledError:
+ # Bail if we've been cancelled
+ raise
+ except Exception as e:
+ if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS:
+ logger.info("Error fetching %s: %s", uri_str, e)
+ raise _FetchWellKnownFailure(temporary=True)
+
+ logger.info("Error fetching %s: %s. Retrying", uri_str, e)
+
+ # Sleep briefly in the hopes that they come back up
+ yield self._clock.sleep(0.5)
def _cache_period_from_headers(headers, time_now=time.time):
@@ -185,3 +292,10 @@ def _parse_cache_control(headers):
v = splits[1] if len(splits) > 1 else None
cache_controls[k] = v
return cache_controls
+
+
+@attr.s()
+class _FetchWellKnownFailure(Exception):
+ # True if we didn't get a non-5xx HTTP response, i.e. this may or may not be
+ # a temporary failure.
+ temporary = attr.ib()
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index d07d356464..3f7c93ffcb 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -36,7 +36,6 @@ from twisted.internet.task import _EPSILON, Cooperator
from twisted.web._newclient import ResponseDone
from twisted.web.http_headers import Headers
-import synapse.logging.opentracing as opentracing
import synapse.metrics
import synapse.util.retryutils
from synapse.api.errors import (
@@ -50,6 +49,12 @@ from synapse.http import QuieterFileBodyProducer
from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.logging.context import make_deferred_yieldable
+from synapse.logging.opentracing import (
+ inject_active_span_byte_dict,
+ set_tag,
+ start_active_span,
+ tags,
+)
from synapse.util.async_helpers import timeout_deferred
from synapse.util.metrics import Measure
@@ -340,21 +345,20 @@ class MatrixFederationHttpClient(object):
else:
query_bytes = b""
- # Retreive current span
- scope = opentracing.start_active_span(
+ scope = start_active_span(
"outgoing-federation-request",
tags={
- opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT,
- opentracing.tags.PEER_ADDRESS: request.destination,
- opentracing.tags.HTTP_METHOD: request.method,
- opentracing.tags.HTTP_URL: request.path,
+ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
+ tags.PEER_ADDRESS: request.destination,
+ tags.HTTP_METHOD: request.method,
+ tags.HTTP_URL: request.path,
},
finish_on_close=True,
)
# Inject the span into the headers
headers_dict = {}
- opentracing.inject_active_span_byte_dict(headers_dict, request.destination)
+ inject_active_span_byte_dict(headers_dict, request.destination)
headers_dict[b"User-Agent"] = [self.version_string_bytes]
@@ -436,9 +440,7 @@ class MatrixFederationHttpClient(object):
response.phrase.decode("ascii", errors="replace"),
)
- opentracing.set_tag(
- opentracing.tags.HTTP_STATUS_CODE, response.code
- )
+ set_tag(tags.HTTP_STATUS_CODE, response.code)
if 200 <= response.code < 300:
pass
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
new file mode 100644
index 0000000000..332da02a8d
--- /dev/null
+++ b/synapse/http/proxyagent.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+import re
+
+from zope.interface import implementer
+
+from twisted.internet import defer
+from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
+from twisted.python.failure import Failure
+from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase
+from twisted.web.error import SchemeNotSupported
+from twisted.web.iweb import IAgent
+
+from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
+
+logger = logging.getLogger(__name__)
+
+_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
+
+
+@implementer(IAgent)
+class ProxyAgent(_AgentBase):
+ """An Agent implementation which will use an HTTP proxy if one was requested
+
+ Args:
+ reactor: twisted reactor to place outgoing
+ connections.
+
+ contextFactory (IPolicyForHTTPS): A factory for TLS contexts, to control the
+ verification parameters of OpenSSL. The default is to use a
+ `BrowserLikePolicyForHTTPS`, so unless you have special
+ requirements you can leave this as-is.
+
+ connectTimeout (float): The amount of time that this Agent will wait
+ for the peer to accept a connection.
+
+ bindAddress (bytes): The local address for client sockets to bind to.
+
+ pool (HTTPConnectionPool|None): connection pool to be used. If None, a
+ non-persistent pool instance will be created.
+ """
+
+ def __init__(
+ self,
+ reactor,
+ contextFactory=BrowserLikePolicyForHTTPS(),
+ connectTimeout=None,
+ bindAddress=None,
+ pool=None,
+ http_proxy=None,
+ https_proxy=None,
+ ):
+ _AgentBase.__init__(self, reactor, pool)
+
+ self._endpoint_kwargs = {}
+ if connectTimeout is not None:
+ self._endpoint_kwargs["timeout"] = connectTimeout
+ if bindAddress is not None:
+ self._endpoint_kwargs["bindAddress"] = bindAddress
+
+ self.http_proxy_endpoint = _http_proxy_endpoint(
+ http_proxy, reactor, **self._endpoint_kwargs
+ )
+
+ self.https_proxy_endpoint = _http_proxy_endpoint(
+ https_proxy, reactor, **self._endpoint_kwargs
+ )
+
+ self._policy_for_https = contextFactory
+ self._reactor = reactor
+
+ def request(self, method, uri, headers=None, bodyProducer=None):
+ """
+ Issue a request to the server indicated by the given uri.
+
+ Supports `http` and `https` schemes.
+
+ An existing connection from the connection pool may be used or a new one may be
+ created.
+
+ See also: twisted.web.iweb.IAgent.request
+
+ Args:
+ method (bytes): The request method to use, such as `GET`, `POST`, etc
+
+ uri (bytes): The location of the resource to request.
+
+ headers (Headers|None): Extra headers to send with the request
+
+ bodyProducer (IBodyProducer|None): An object which can generate bytes to
+ make up the body of this request (for example, the properly encoded
+ contents of a file for a file upload). Or, None if the request is to
+ have no body.
+
+ Returns:
+ Deferred[IResponse]: completes when the header of the response has
+ been received (regardless of the response status code).
+ """
+ uri = uri.strip()
+ if not _VALID_URI.match(uri):
+ raise ValueError("Invalid URI {!r}".format(uri))
+
+ parsed_uri = URI.fromBytes(uri)
+ pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
+ request_path = parsed_uri.originForm
+
+ if parsed_uri.scheme == b"http" and self.http_proxy_endpoint:
+ # Cache *all* connections under the same key, since we are only
+ # connecting to a single destination, the proxy:
+ pool_key = ("http-proxy", self.http_proxy_endpoint)
+ endpoint = self.http_proxy_endpoint
+ request_path = uri
+ elif parsed_uri.scheme == b"https" and self.https_proxy_endpoint:
+ endpoint = HTTPConnectProxyEndpoint(
+ self._reactor,
+ self.https_proxy_endpoint,
+ parsed_uri.host,
+ parsed_uri.port,
+ )
+ else:
+ # not using a proxy
+ endpoint = HostnameEndpoint(
+ self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
+ )
+
+ logger.debug("Requesting %s via %s", uri, endpoint)
+
+ if parsed_uri.scheme == b"https":
+ tls_connection_creator = self._policy_for_https.creatorForNetloc(
+ parsed_uri.host, parsed_uri.port
+ )
+ endpoint = wrapClientTLS(tls_connection_creator, endpoint)
+ elif parsed_uri.scheme == b"http":
+ pass
+ else:
+ return defer.fail(
+ Failure(
+ SchemeNotSupported("Unsupported scheme: %r" % (parsed_uri.scheme,))
+ )
+ )
+
+ return self._requestWithEndpoint(
+ pool_key, endpoint, method, parsed_uri, headers, bodyProducer, request_path
+ )
+
+
+def _http_proxy_endpoint(proxy, reactor, **kwargs):
+ """Parses an http proxy setting and returns an endpoint for the proxy
+
+ Args:
+ proxy (bytes|None): the proxy setting
+ reactor: reactor to be used to connect to the proxy
+ kwargs: other args to be passed to HostnameEndpoint
+
+ Returns:
+ interfaces.IStreamClientEndpoint|None: endpoint to use to connect to the proxy,
+ or None
+ """
+ if proxy is None:
+ return None
+
+ # currently we only support hostname:port. Some apps also support
+ # protocol://<host>[:port], which allows a way of requiring a TLS connection to the
+ # proxy.
+
+ host, port = parse_host_port(proxy, default_port=1080)
+ return HostnameEndpoint(reactor, host, port, **kwargs)
+
+
+def parse_host_port(hostport, default_port=None):
+ # could have sworn we had one of these somewhere else...
+ if b":" in hostport:
+ host, port = hostport.rsplit(b":", 1)
+ try:
+ port = int(port)
+ return host, port
+ except ValueError:
+ # the thing after the : wasn't a valid port; presumably this is an
+ # IPv6 address.
+ pass
+
+ return hostport, default_port
diff --git a/synapse/http/server.py b/synapse/http/server.py
index e6f351ba3b..cb9158fe1b 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -40,6 +40,7 @@ from synapse.api.errors import (
UnrecognizedRequestError,
)
from synapse.logging.context import preserve_fn
+from synapse.logging.opentracing import trace_servlet
from synapse.util.caches import intern_dict
logger = logging.getLogger(__name__)
@@ -257,7 +258,9 @@ class JsonResource(HttpServer, resource.Resource):
self.path_regexs = {}
self.hs = hs
- def register_paths(self, method, path_patterns, callback, servlet_classname):
+ def register_paths(
+ self, method, path_patterns, callback, servlet_classname, trace=True
+ ):
"""
Registers a request handler against a regular expression. Later request URLs are
checked against these regular expressions in order to identify an appropriate
@@ -273,8 +276,16 @@ class JsonResource(HttpServer, resource.Resource):
servlet_classname (str): The name of the handler to be used in prometheus
and opentracing logs.
+
+ trace (bool): Whether we should start a span to trace the servlet.
"""
method = method.encode("utf-8") # method is bytes on py3
+
+ if trace:
+ # We don't extract the context from the servlet because we can't
+ # trust the sender
+ callback = trace_servlet(servlet_classname)(callback)
+
for path_pattern in path_patterns:
logger.debug("Registering for %s %s", method, path_pattern.pattern)
self.path_regexs.setdefault(method, []).append(
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index fd07bf7b8e..274c1a6a87 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -20,7 +20,6 @@ import logging
from canonicaljson import json
from synapse.api.errors import Codes, SynapseError
-from synapse.logging.opentracing import trace_servlet
logger = logging.getLogger(__name__)
@@ -298,10 +297,7 @@ class RestServlet(object):
servlet_classname = self.__class__.__name__
method_handler = getattr(self, "on_%s" % (method,))
http_server.register_paths(
- method,
- patterns,
- trace_servlet(servlet_classname, method_handler),
- servlet_classname,
+ method, patterns, method_handler, servlet_classname
)
else:
diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py
new file mode 100644
index 0000000000..3220e985a9
--- /dev/null
+++ b/synapse/logging/_structured.py
@@ -0,0 +1,374 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os.path
+import sys
+import typing
+import warnings
+from typing import List
+
+import attr
+from constantly import NamedConstant, Names, ValueConstant, Values
+from zope.interface import implementer
+
+from twisted.logger import (
+ FileLogObserver,
+ FilteringLogObserver,
+ ILogObserver,
+ LogBeginner,
+ Logger,
+ LogLevel,
+ LogLevelFilterPredicate,
+ LogPublisher,
+ eventAsText,
+ jsonFileLogObserver,
+)
+
+from synapse.config._base import ConfigError
+from synapse.logging._terse_json import (
+ TerseJSONToConsoleLogObserver,
+ TerseJSONToTCPLogObserver,
+)
+from synapse.logging.context import LoggingContext
+
+
+def stdlib_log_level_to_twisted(level: str) -> LogLevel:
+ """
+ Convert a stdlib log level to Twisted's log level.
+ """
+ lvl = level.lower().replace("warning", "warn")
+ return LogLevel.levelWithName(lvl)
+
+
+@attr.s
+@implementer(ILogObserver)
+class LogContextObserver(object):
+ """
+ An ILogObserver which adds Synapse-specific log context information.
+
+ Attributes:
+ observer (ILogObserver): The target parent observer.
+ """
+
+ observer = attr.ib()
+
+ def __call__(self, event: dict) -> None:
+ """
+ Consume a log event and emit it to the parent observer after filtering
+ and adding log context information.
+
+ Args:
+ event (dict)
+ """
+ # Filter out some useless events that Twisted outputs
+ if "log_text" in event:
+ if event["log_text"].startswith("DNSDatagramProtocol starting on "):
+ return
+
+ if event["log_text"].startswith("(UDP Port "):
+ return
+
+ if event["log_text"].startswith("Timing out client") or event[
+ "log_format"
+ ].startswith("Timing out client"):
+ return
+
+ context = LoggingContext.current_context()
+
+ # Copy the context information to the log event.
+ if context is not None:
+ context.copy_to_twisted_log_entry(event)
+ else:
+ # If there's no logging context, not even the root one, we might be
+ # starting up or it might be from non-Synapse code. Log it as if it
+ # came from the root logger.
+ event["request"] = None
+ event["scope"] = None
+
+ self.observer(event)
+
+
+class PythonStdlibToTwistedLogger(logging.Handler):
+ """
+ Transform a Python stdlib log message into a Twisted one.
+ """
+
+ def __init__(self, observer, *args, **kwargs):
+ """
+ Args:
+ observer (ILogObserver): A Twisted logging observer.
+ *args, **kwargs: Args/kwargs to be passed to logging.Handler.
+ """
+ self.observer = observer
+ super().__init__(*args, **kwargs)
+
+ def emit(self, record: logging.LogRecord) -> None:
+ """
+ Emit a record to Twisted's observer.
+
+ Args:
+ record (logging.LogRecord)
+ """
+
+ self.observer(
+ {
+ "log_time": record.created,
+ "log_text": record.getMessage(),
+ "log_format": "{log_text}",
+ "log_namespace": record.name,
+ "log_level": stdlib_log_level_to_twisted(record.levelname),
+ }
+ )
+
+
+def SynapseFileLogObserver(outFile: typing.IO[str]) -> FileLogObserver:
+ """
+ A log observer that formats events like the traditional log formatter and
+ sends them to `outFile`.
+
+ Args:
+ outFile (file object): The file object to write to.
+ """
+
+ def formatEvent(_event: dict) -> str:
+ event = dict(_event)
+ event["log_level"] = event["log_level"].name.upper()
+ event["log_format"] = "- {log_namespace} - {log_level} - {request} - " + (
+ event.get("log_format", "{log_text}") or "{log_text}"
+ )
+ return eventAsText(event, includeSystem=False) + "\n"
+
+ return FileLogObserver(outFile, formatEvent)
+
+
+class DrainType(Names):
+ CONSOLE = NamedConstant()
+ CONSOLE_JSON = NamedConstant()
+ CONSOLE_JSON_TERSE = NamedConstant()
+ FILE = NamedConstant()
+ FILE_JSON = NamedConstant()
+ NETWORK_JSON_TERSE = NamedConstant()
+
+
+class OutputPipeType(Values):
+ stdout = ValueConstant(sys.__stdout__)
+ stderr = ValueConstant(sys.__stderr__)
+
+
+@attr.s
+class DrainConfiguration(object):
+ name = attr.ib()
+ type = attr.ib()
+ location = attr.ib()
+ options = attr.ib(default=None)
+
+
+@attr.s
+class NetworkJSONTerseOptions(object):
+ maximum_buffer = attr.ib(type=int)
+
+
+DEFAULT_LOGGERS = {"synapse": {"level": "INFO"}}
+
+
+def parse_drain_configs(
+ drains: dict
+) -> typing.Generator[DrainConfiguration, None, None]:
+ """
+ Parse the drain configurations.
+
+ Args:
+ drains (dict): A list of drain configurations.
+
+ Yields:
+ DrainConfiguration instances.
+
+ Raises:
+ ConfigError: If any of the drain configuration items are invalid.
+ """
+ for name, config in drains.items():
+ if "type" not in config:
+ raise ConfigError("Logging drains require a 'type' key.")
+
+ try:
+ logging_type = DrainType.lookupByName(config["type"].upper())
+ except ValueError:
+ raise ConfigError(
+ "%s is not a known logging drain type." % (config["type"],)
+ )
+
+ if logging_type in [
+ DrainType.CONSOLE,
+ DrainType.CONSOLE_JSON,
+ DrainType.CONSOLE_JSON_TERSE,
+ ]:
+ location = config.get("location")
+ if location is None or location not in ["stdout", "stderr"]:
+ raise ConfigError(
+ (
+ "The %s drain needs the 'location' key set to "
+ "either 'stdout' or 'stderr'."
+ )
+ % (logging_type,)
+ )
+
+ pipe = OutputPipeType.lookupByName(location).value
+
+ yield DrainConfiguration(name=name, type=logging_type, location=pipe)
+
+ elif logging_type in [DrainType.FILE, DrainType.FILE_JSON]:
+ if "location" not in config:
+ raise ConfigError(
+ "The %s drain needs the 'location' key set." % (logging_type,)
+ )
+
+ location = config.get("location")
+ if os.path.abspath(location) != location:
+ raise ConfigError(
+ "File paths need to be absolute, '%s' is a relative path"
+ % (location,)
+ )
+ yield DrainConfiguration(name=name, type=logging_type, location=location)
+
+ elif logging_type in [DrainType.NETWORK_JSON_TERSE]:
+ host = config.get("host")
+ port = config.get("port")
+ maximum_buffer = config.get("maximum_buffer", 1000)
+ yield DrainConfiguration(
+ name=name,
+ type=logging_type,
+ location=(host, port),
+ options=NetworkJSONTerseOptions(maximum_buffer=maximum_buffer),
+ )
+
+ else:
+ raise ConfigError(
+ "The %s drain type is currently not implemented."
+ % (config["type"].upper(),)
+ )
+
+
+def setup_structured_logging(
+ hs,
+ config,
+ log_config: dict,
+ logBeginner: LogBeginner,
+ redirect_stdlib_logging: bool = True,
+) -> LogPublisher:
+ """
+ Set up Twisted's structured logging system.
+
+ Args:
+ hs: The homeserver to use.
+ config (HomeserverConfig): The configuration of the Synapse homeserver.
+ log_config (dict): The log configuration to use.
+ """
+ if config.no_redirect_stdio:
+ raise ConfigError(
+ "no_redirect_stdio cannot be defined using structured logging."
+ )
+
+ logger = Logger()
+
+ if "drains" not in log_config:
+ raise ConfigError("The logging configuration requires a list of drains.")
+
+ observers = [] # type: List[ILogObserver]
+
+ for observer in parse_drain_configs(log_config["drains"]):
+ # Pipe drains
+ if observer.type == DrainType.CONSOLE:
+ logger.debug(
+ "Starting up the {name} console logger drain", name=observer.name
+ )
+ observers.append(SynapseFileLogObserver(observer.location))
+ elif observer.type == DrainType.CONSOLE_JSON:
+ logger.debug(
+ "Starting up the {name} JSON console logger drain", name=observer.name
+ )
+ observers.append(jsonFileLogObserver(observer.location))
+ elif observer.type == DrainType.CONSOLE_JSON_TERSE:
+ logger.debug(
+ "Starting up the {name} terse JSON console logger drain",
+ name=observer.name,
+ )
+ observers.append(
+ TerseJSONToConsoleLogObserver(observer.location, metadata={})
+ )
+
+ # File drains
+ elif observer.type == DrainType.FILE:
+ logger.debug("Starting up the {name} file logger drain", name=observer.name)
+ log_file = open(observer.location, "at", buffering=1, encoding="utf8")
+ observers.append(SynapseFileLogObserver(log_file))
+ elif observer.type == DrainType.FILE_JSON:
+ logger.debug(
+ "Starting up the {name} JSON file logger drain", name=observer.name
+ )
+ log_file = open(observer.location, "at", buffering=1, encoding="utf8")
+ observers.append(jsonFileLogObserver(log_file))
+
+ elif observer.type == DrainType.NETWORK_JSON_TERSE:
+ metadata = {"server_name": hs.config.server_name}
+ log_observer = TerseJSONToTCPLogObserver(
+ hs=hs,
+ host=observer.location[0],
+ port=observer.location[1],
+ metadata=metadata,
+ maximum_buffer=observer.options.maximum_buffer,
+ )
+ log_observer.start()
+ observers.append(log_observer)
+ else:
+ # We should never get here, but, just in case, throw an error.
+ raise ConfigError("%s drain type cannot be configured" % (observer.type,))
+
+ publisher = LogPublisher(*observers)
+ log_filter = LogLevelFilterPredicate()
+
+ for namespace, namespace_config in log_config.get(
+ "loggers", DEFAULT_LOGGERS
+ ).items():
+ # Set the log level for twisted.logger.Logger namespaces
+ log_filter.setLogLevelForNamespace(
+ namespace,
+ stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")),
+ )
+
+ # Also set the log levels for the stdlib logger namespaces, to prevent
+ # them getting to PythonStdlibToTwistedLogger and having to be formatted
+ if "level" in namespace_config:
+ logging.getLogger(namespace).setLevel(namespace_config.get("level"))
+
+ f = FilteringLogObserver(publisher, [log_filter])
+ lco = LogContextObserver(f)
+
+ if redirect_stdlib_logging:
+ stuff_into_twisted = PythonStdlibToTwistedLogger(lco)
+ stdliblogger = logging.getLogger()
+ stdliblogger.addHandler(stuff_into_twisted)
+
+ # Always redirect standard I/O, otherwise other logging outputs might miss
+ # it.
+ logBeginner.beginLoggingTo([lco], redirectStandardIO=True)
+
+ return publisher
+
+
+def reload_structured_logging(*args, log_config=None) -> None:
+ warnings.warn(
+ "Currently the structured logging system can not be reloaded, doing nothing"
+ )
diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py
new file mode 100644
index 0000000000..0ebbde06f2
--- /dev/null
+++ b/synapse/logging/_terse_json.py
@@ -0,0 +1,280 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Log formatters that output terse JSON.
+"""
+
+import sys
+from collections import deque
+from ipaddress import IPv4Address, IPv6Address, ip_address
+from math import floor
+from typing import IO
+
+import attr
+from simplejson import dumps
+from zope.interface import implementer
+
+from twisted.application.internet import ClientService
+from twisted.internet.endpoints import (
+ HostnameEndpoint,
+ TCP4ClientEndpoint,
+ TCP6ClientEndpoint,
+)
+from twisted.internet.protocol import Factory, Protocol
+from twisted.logger import FileLogObserver, ILogObserver, Logger
+from twisted.python.failure import Failure
+
+
+def flatten_event(event: dict, metadata: dict, include_time: bool = False):
+ """
+ Flatten a Twisted logging event to an dictionary capable of being sent
+ as a log event to a logging aggregation system.
+
+ The format is vastly simplified and is not designed to be a "human readable
+ string" in the sense that traditional logs are. Instead, the structure is
+ optimised for searchability and filtering, with human-understandable log
+ keys.
+
+ Args:
+ event (dict): The Twisted logging event we are flattening.
+ metadata (dict): Additional data to include with each log message. This
+ can be information like the server name. Since the target log
+ consumer does not know who we are other than by host IP, this
+ allows us to forward through static information.
+ include_time (bool): Should we include the `time` key? If False, the
+ event time is stripped from the event.
+ """
+ new_event = {}
+
+ # If it's a failure, make the new event's log_failure be the traceback text.
+ if "log_failure" in event:
+ new_event["log_failure"] = event["log_failure"].getTraceback()
+
+ # If it's a warning, copy over a string representation of the warning.
+ if "warning" in event:
+ new_event["warning"] = str(event["warning"])
+
+ # Stdlib logging events have "log_text" as their human-readable portion,
+ # Twisted ones have "log_format". For now, include the log_format, so that
+ # context only given in the log format (e.g. what is being logged) is
+ # available.
+ if "log_text" in event:
+ new_event["log"] = event["log_text"]
+ else:
+ new_event["log"] = event["log_format"]
+
+ # We want to include the timestamp when forwarding over the network, but
+ # exclude it when we are writing to stdout. This is because the log ingester
+ # (e.g. logstash, fluentd) can add its own timestamp.
+ if include_time:
+ new_event["time"] = round(event["log_time"], 2)
+
+ # Convert the log level to a textual representation.
+ new_event["level"] = event["log_level"].name.upper()
+
+ # Ignore these keys, and do not transfer them over to the new log object.
+ # They are either useless (isError), transferred manually above (log_time,
+ # log_level, etc), or contain Python objects which are not useful for output
+ # (log_logger, log_source).
+ keys_to_delete = [
+ "isError",
+ "log_failure",
+ "log_format",
+ "log_level",
+ "log_logger",
+ "log_source",
+ "log_system",
+ "log_time",
+ "log_text",
+ "observer",
+ "warning",
+ ]
+
+ # If it's from the Twisted legacy logger (twisted.python.log), it adds some
+ # more keys we want to purge.
+ if event.get("log_namespace") == "log_legacy":
+ keys_to_delete.extend(["message", "system", "time"])
+
+ # Rather than modify the dictionary in place, construct a new one with only
+ # the content we want. The original event should be considered 'frozen'.
+ for key in event.keys():
+
+ if key in keys_to_delete:
+ continue
+
+ if isinstance(event[key], (str, int, bool, float)) or event[key] is None:
+ # If it's a plain type, include it as is.
+ new_event[key] = event[key]
+ else:
+ # If it's not one of those basic types, write out a string
+ # representation. This should probably be a warning in development,
+ # so that we are sure we are only outputting useful data.
+ new_event[key] = str(event[key])
+
+ # Add the metadata information to the event (e.g. the server_name).
+ new_event.update(metadata)
+
+ return new_event
+
+
+def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogObserver:
+ """
+ A log observer that formats events to a flattened JSON representation.
+
+ Args:
+ outFile: The file object to write to.
+ metadata: Metadata to be added to each log object.
+ """
+
+ def formatEvent(_event: dict) -> str:
+ flattened = flatten_event(_event, metadata)
+ return dumps(flattened, ensure_ascii=False, separators=(",", ":")) + "\n"
+
+ return FileLogObserver(outFile, formatEvent)
+
+
+@attr.s
+@implementer(ILogObserver)
+class TerseJSONToTCPLogObserver(object):
+ """
+ An IObserver that writes JSON logs to a TCP target.
+
+ Args:
+ hs (HomeServer): The Homeserver that is being logged for.
+ host: The host of the logging target.
+ port: The logging target's port.
+ metadata: Metadata to be added to each log entry.
+ """
+
+ hs = attr.ib()
+ host = attr.ib(type=str)
+ port = attr.ib(type=int)
+ metadata = attr.ib(type=dict)
+ maximum_buffer = attr.ib(type=int)
+ _buffer = attr.ib(default=attr.Factory(deque), type=deque)
+ _writer = attr.ib(default=None)
+ _logger = attr.ib(default=attr.Factory(Logger))
+
+ def start(self) -> None:
+
+ # Connect without DNS lookups if it's a direct IP.
+ try:
+ ip = ip_address(self.host)
+ if isinstance(ip, IPv4Address):
+ endpoint = TCP4ClientEndpoint(
+ self.hs.get_reactor(), self.host, self.port
+ )
+ elif isinstance(ip, IPv6Address):
+ endpoint = TCP6ClientEndpoint(
+ self.hs.get_reactor(), self.host, self.port
+ )
+ except ValueError:
+ endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port)
+
+ factory = Factory.forProtocol(Protocol)
+ self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor())
+ self._service.startService()
+
+ def _write_loop(self) -> None:
+ """
+ Implement the write loop.
+ """
+ if self._writer:
+ return
+
+ self._writer = self._service.whenConnected()
+
+ @self._writer.addBoth
+ def writer(r):
+ if isinstance(r, Failure):
+ r.printTraceback(file=sys.__stderr__)
+ self._writer = None
+ self.hs.get_reactor().callLater(1, self._write_loop)
+ return
+
+ try:
+ for event in self._buffer:
+ r.transport.write(
+ dumps(event, ensure_ascii=False, separators=(",", ":")).encode(
+ "utf8"
+ )
+ )
+ r.transport.write(b"\n")
+ self._buffer.clear()
+ except Exception as e:
+ sys.__stderr__.write("Failed writing out logs with %s\n" % (str(e),))
+
+ self._writer = False
+ self.hs.get_reactor().callLater(1, self._write_loop)
+
+ def _handle_pressure(self) -> None:
+ """
+ Handle backpressure by shedding events.
+
+ The buffer will, in this order, until the buffer is below the maximum:
+ - Shed DEBUG events
+ - Shed INFO events
+ - Shed the middle 50% of the events.
+ """
+ if len(self._buffer) <= self.maximum_buffer:
+ return
+
+ # Strip out DEBUGs
+ self._buffer = deque(
+ filter(lambda event: event["level"] != "DEBUG", self._buffer)
+ )
+
+ if len(self._buffer) <= self.maximum_buffer:
+ return
+
+ # Strip out INFOs
+ self._buffer = deque(
+ filter(lambda event: event["level"] != "INFO", self._buffer)
+ )
+
+ if len(self._buffer) <= self.maximum_buffer:
+ return
+
+ # Cut the middle entries out
+ buffer_split = floor(self.maximum_buffer / 2)
+
+ old_buffer = self._buffer
+ self._buffer = deque()
+
+ for i in range(buffer_split):
+ self._buffer.append(old_buffer.popleft())
+
+ end_buffer = []
+ for i in range(buffer_split):
+ end_buffer.append(old_buffer.pop())
+
+ self._buffer.extend(reversed(end_buffer))
+
+ def __call__(self, event: dict) -> None:
+ flattened = flatten_event(event, self.metadata, include_time=True)
+ self._buffer.append(flattened)
+
+ # Handle backpressure, if it exists.
+ try:
+ self._handle_pressure()
+ except Exception:
+ # If handling backpressure fails,clear the buffer and log the
+ # exception.
+ self._buffer.clear()
+ self._logger.failure("Failed clearing backpressure")
+
+ # Try and write immediately.
+ self._write_loop()
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index b456c31f70..63379bfb93 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -25,6 +25,7 @@ See doc/log_contexts.rst for details on how this works.
import logging
import threading
import types
+from typing import Any, List
from twisted.internet import defer, threads
@@ -194,7 +195,7 @@ class LoggingContext(object):
class Sentinel(object):
"""Sentinel to represent the root context"""
- __slots__ = []
+ __slots__ = [] # type: List[Any]
def __str__(self):
return "sentinel"
@@ -202,6 +203,10 @@ class LoggingContext(object):
def copy_to(self, record):
pass
+ def copy_to_twisted_log_entry(self, record):
+ record["request"] = None
+ record["scope"] = None
+
def start(self):
pass
@@ -330,6 +335,13 @@ class LoggingContext(object):
# we also track the current scope:
record.scope = self.scope
+ def copy_to_twisted_log_entry(self, record):
+ """
+ Copy logging fields from this context to a Twisted log record.
+ """
+ record["request"] = self.request
+ record["scope"] = self.scope
+
def start(self):
if get_thread_id() != self.main_thread:
logger.warning("Started logcontext %s on different thread", self)
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index d2c209c471..308a27213b 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -43,6 +43,9 @@ OpenTracing to be easily disabled in Synapse and thereby have OpenTracing as
an optional dependency. This does however limit the number of modifiable spans
at any point in the code to one. From here out references to `opentracing`
in the code snippets refer to the Synapses module.
+Most methods provided in the module have a direct correlation to those provided
+by opentracing. Refer to docs there for a more in-depth documentation on some of
+the args and methods.
Tracing
-------
@@ -68,52 +71,62 @@ set a tag on the current active span.
Tracing functions
-----------------
-Functions can be easily traced using decorators. There is a decorator for
-'normal' function and for functions which are actually deferreds. The name of
+Functions can be easily traced using decorators. The name of
the function becomes the operation name for the span.
.. code-block:: python
- from synapse.logging.opentracing import trace, trace_deferred
+ from synapse.logging.opentracing import trace
- # Start a span using 'normal_function' as the operation name
+ # Start a span using 'interesting_function' as the operation name
@trace
- def normal_function(*args, **kwargs):
+ def interesting_function(*args, **kwargs):
# Does all kinds of cool and expected things
return something_usual_and_useful
- # Start a span using 'deferred_function' as the operation name
- @trace_deferred
- @defer.inlineCallbacks
- def deferred_function(*args, **kwargs):
- # We start
- yield we_wait
- # we finish
- return something_usual_and_useful
-Operation names can be explicitly set for functions by using
-``trace_using_operation_name`` and
-``trace_deferred_using_operation_name``
+Operation names can be explicitly set for a function by passing the
+operation name to ``trace``
.. code-block:: python
- from synapse.logging.opentracing import (
- trace_using_operation_name,
- trace_deferred_using_operation_name
- )
+ from synapse.logging.opentracing import trace
- @trace_using_operation_name("A *much* better operation name")
- def normal_function(*args, **kwargs):
+ @trace(opname="a_better_operation_name")
+ def interesting_badly_named_function(*args, **kwargs):
# Does all kinds of cool and expected things
return something_usual_and_useful
- @trace_deferred_using_operation_name("Another exciting operation name!")
- @defer.inlineCallbacks
- def deferred_function(*args, **kwargs):
- # We start
- yield we_wait
- # we finish
- return something_usual_and_useful
+Setting Tags
+------------
+
+To set a tag on the active span do
+
+.. code-block:: python
+
+ from synapse.logging.opentracing import set_tag
+
+ set_tag(tag_name, tag_value)
+
+There's a convenient decorator to tag all the args of the method. It uses
+inspection in order to use the formal parameter names prefixed with 'ARG_' as
+tag names. It uses kwarg names as tag names without the prefix.
+
+.. code-block:: python
+
+ from synapse.logging.opentracing import tag_args
+
+ @tag_args
+ def set_fates(clotho, lachesis, atropos, father="Zues", mother="Themis"):
+ pass
+
+ set_fates("the story", "the end", "the act")
+ # This will have the following tags
+ # - ARG_clotho: "the story"
+ # - ARG_lachesis: "the end"
+ # - ARG_atropos: "the act"
+ # - father: "Zues"
+ # - mother: "Themis"
Contexts and carriers
---------------------
@@ -136,6 +149,9 @@ unchartered waters will require the enforcement of the whitelist.
``logging/opentracing.py`` has a ``whitelisted_homeserver`` method which takes
in a destination and compares it to the whitelist.
+Most injection methods take a 'destination' arg. The context will only be injected
+if the destination matches the whitelist or the destination is None.
+
=======
Gotchas
=======
@@ -161,16 +177,54 @@ from twisted.internet import defer
from synapse.config import ConfigError
+# Helper class
+
+
+class _DummyTagNames(object):
+ """wrapper of opentracings tags. We need to have them if we
+ want to reference them without opentracing around. Clearly they
+ should never actually show up in a trace. `set_tags` overwrites
+ these with the correct ones."""
+
+ INVALID_TAG = "invalid-tag"
+ COMPONENT = INVALID_TAG
+ DATABASE_INSTANCE = INVALID_TAG
+ DATABASE_STATEMENT = INVALID_TAG
+ DATABASE_TYPE = INVALID_TAG
+ DATABASE_USER = INVALID_TAG
+ ERROR = INVALID_TAG
+ HTTP_METHOD = INVALID_TAG
+ HTTP_STATUS_CODE = INVALID_TAG
+ HTTP_URL = INVALID_TAG
+ MESSAGE_BUS_DESTINATION = INVALID_TAG
+ PEER_ADDRESS = INVALID_TAG
+ PEER_HOSTNAME = INVALID_TAG
+ PEER_HOST_IPV4 = INVALID_TAG
+ PEER_HOST_IPV6 = INVALID_TAG
+ PEER_PORT = INVALID_TAG
+ PEER_SERVICE = INVALID_TAG
+ SAMPLING_PRIORITY = INVALID_TAG
+ SERVICE = INVALID_TAG
+ SPAN_KIND = INVALID_TAG
+ SPAN_KIND_CONSUMER = INVALID_TAG
+ SPAN_KIND_PRODUCER = INVALID_TAG
+ SPAN_KIND_RPC_CLIENT = INVALID_TAG
+ SPAN_KIND_RPC_SERVER = INVALID_TAG
+
+
try:
import opentracing
+
+ tags = opentracing.tags
except ImportError:
opentracing = None
+ tags = _DummyTagNames
try:
from jaeger_client import Config as JaegerConfig
from synapse.logging.scopecontextmanager import LogContextScopeManager
except ImportError:
- JaegerConfig = None
- LogContextScopeManager = None
+ JaegerConfig = None # type: ignore
+ LogContextScopeManager = None # type: ignore
logger = logging.getLogger(__name__)
@@ -185,8 +239,7 @@ _homeserver_whitelist = None
def only_if_tracing(func):
- """Executes the function only if we're tracing. Otherwise return.
- Assumes the function wrapped may return None"""
+ """Executes the function only if we're tracing. Otherwise returns None."""
@wraps(func)
def _only_if_tracing_inner(*args, **kwargs):
@@ -198,6 +251,41 @@ def only_if_tracing(func):
return _only_if_tracing_inner
+def ensure_active_span(message, ret=None):
+ """Executes the operation only if opentracing is enabled and there is an active span.
+ If there is no active span it logs message at the error level.
+
+ Args:
+ message (str): Message which fills in "There was no active span when trying to %s"
+ in the error log if there is no active span and opentracing is enabled.
+ ret (object): return value if opentracing is None or there is no active span.
+
+ Returns (object): The result of the func or ret if opentracing is disabled or there
+ was no active span.
+ """
+
+ def ensure_active_span_inner_1(func):
+ @wraps(func)
+ def ensure_active_span_inner_2(*args, **kwargs):
+ if not opentracing:
+ return ret
+
+ if not opentracing.tracer.active_span:
+ logger.error(
+ "There was no active span when trying to %s."
+ " Did you forget to start one or did a context slip?",
+ message,
+ )
+
+ return ret
+
+ return func(*args, **kwargs)
+
+ return ensure_active_span_inner_2
+
+ return ensure_active_span_inner_1
+
+
@contextlib.contextmanager
def _noop_context_manager(*args, **kwargs):
"""Does exactly what it says on the tin"""
@@ -239,10 +327,6 @@ def init_tracer(config):
scope_manager=LogContextScopeManager(config),
).initialize_tracer()
- # Set up tags to be opentracing's tags
- global tags
- tags = opentracing.tags
-
# Whitelisting
@@ -269,7 +353,7 @@ def whitelisted_homeserver(destination):
Args:
destination (str)
"""
- _homeserver_whitelist
+
if _homeserver_whitelist:
return _homeserver_whitelist.match(destination)
return False
@@ -299,30 +383,28 @@ def start_active_span(
if opentracing is None:
return _noop_context_manager()
- else:
- # We need to enter the scope here for the logcontext to become active
- return opentracing.tracer.start_active_span(
- operation_name,
- child_of=child_of,
- references=references,
- tags=tags,
- start_time=start_time,
- ignore_active_span=ignore_active_span,
- finish_on_close=finish_on_close,
- )
+ return opentracing.tracer.start_active_span(
+ operation_name,
+ child_of=child_of,
+ references=references,
+ tags=tags,
+ start_time=start_time,
+ ignore_active_span=ignore_active_span,
+ finish_on_close=finish_on_close,
+ )
def start_active_span_follows_from(operation_name, contexts):
if opentracing is None:
return _noop_context_manager()
- else:
- references = [opentracing.follows_from(context) for context in contexts]
- scope = start_active_span(operation_name, references=references)
- return scope
+
+ references = [opentracing.follows_from(context) for context in contexts]
+ scope = start_active_span(operation_name, references=references)
+ return scope
-def start_active_span_from_context(
- headers,
+def start_active_span_from_request(
+ request,
operation_name,
references=None,
tags=None,
@@ -331,9 +413,9 @@ def start_active_span_from_context(
finish_on_close=True,
):
"""
- Extracts a span context from Twisted Headers.
+ Extracts a span context from a Twisted Request.
args:
- headers (twisted.web.http_headers.Headers)
+ headers (twisted.web.http.Request)
For the other args see opentracing.tracer
@@ -347,7 +429,9 @@ def start_active_span_from_context(
if opentracing is None:
return _noop_context_manager()
- header_dict = {k.decode(): v[0].decode() for k, v in headers.getAllRawHeaders()}
+ header_dict = {
+ k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders()
+ }
context = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict)
return opentracing.tracer.start_active_span(
@@ -413,19 +497,19 @@ def start_active_span_from_edu(
# Opentracing setters for tags, logs, etc
-@only_if_tracing
+@ensure_active_span("set a tag")
def set_tag(key, value):
"""Sets a tag on the active span"""
opentracing.tracer.active_span.set_tag(key, value)
-@only_if_tracing
+@ensure_active_span("log")
def log_kv(key_values, timestamp=None):
"""Log to the active span"""
opentracing.tracer.active_span.log_kv(key_values, timestamp)
-@only_if_tracing
+@ensure_active_span("set the traces operation name")
def set_operation_name(operation_name):
"""Sets the operation name of the active span"""
opentracing.tracer.active_span.set_operation_name(operation_name)
@@ -434,13 +518,18 @@ def set_operation_name(operation_name):
# Injection and extraction
-@only_if_tracing
-def inject_active_span_twisted_headers(headers, destination):
+@ensure_active_span("inject the span into a header")
+def inject_active_span_twisted_headers(headers, destination, check_destination=True):
"""
Injects a span context into twisted headers in-place
Args:
headers (twisted.web.http_headers.Headers)
+ destination (str): address of entity receiving the span context. If check_destination
+ is true the context will only be injected if the destination matches the
+ opentracing whitelist
+ check_destination (bool): If false, destination will be ignored and the context
+ will always be injected.
span (opentracing.Span)
Returns:
@@ -454,7 +543,7 @@ def inject_active_span_twisted_headers(headers, destination):
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
"""
- if not whitelisted_homeserver(destination):
+ if check_destination and not whitelisted_homeserver(destination):
return
span = opentracing.tracer.active_span
@@ -465,14 +554,19 @@ def inject_active_span_twisted_headers(headers, destination):
headers.addRawHeaders(key, value)
-@only_if_tracing
-def inject_active_span_byte_dict(headers, destination):
+@ensure_active_span("inject the span into a byte dict")
+def inject_active_span_byte_dict(headers, destination, check_destination=True):
"""
Injects a span context into a dict where the headers are encoded as byte
strings
Args:
headers (dict)
+ destination (str): address of entity receiving the span context. If check_destination
+ is true the context will only be injected if the destination matches the
+ opentracing whitelist
+ check_destination (bool): If false, destination will be ignored and the context
+ will always be injected.
span (opentracing.Span)
Returns:
@@ -485,7 +579,7 @@ def inject_active_span_byte_dict(headers, destination):
here:
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
"""
- if not whitelisted_homeserver(destination):
+ if check_destination and not whitelisted_homeserver(destination):
return
span = opentracing.tracer.active_span
@@ -497,16 +591,18 @@ def inject_active_span_byte_dict(headers, destination):
headers[key.encode()] = [value.encode()]
-@only_if_tracing
-def inject_active_span_text_map(carrier, destination=None):
+@ensure_active_span("inject the span into a text map")
+def inject_active_span_text_map(carrier, destination, check_destination=True):
"""
Injects a span context into a dict
Args:
carrier (dict)
- destination (str): the name of the remote server. The span context
- will only be injected if the destination matches the homeserver_whitelist
- or destination is None.
+ destination (str): address of entity receiving the span context. If check_destination
+ is true the context will only be injected if the destination matches the
+ opentracing whitelist
+ check_destination (bool): If false, destination will be ignored and the context
+ will always be injected.
Returns:
In-place modification of carrier
@@ -519,7 +615,7 @@ def inject_active_span_text_map(carrier, destination=None):
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
"""
- if destination and not whitelisted_homeserver(destination):
+ if check_destination and not whitelisted_homeserver(destination):
return
opentracing.tracer.inject(
@@ -527,6 +623,31 @@ def inject_active_span_text_map(carrier, destination=None):
)
+@ensure_active_span("get the active span context as a dict", ret={})
+def get_active_span_text_map(destination=None):
+ """
+ Gets a span context as a dict. This can be used instead of manually
+ injecting a span into an empty carrier.
+
+ Args:
+ destination (str): the name of the remote server.
+
+ Returns:
+ dict: the active span's context if opentracing is enabled, otherwise empty.
+ """
+
+ if destination and not whitelisted_homeserver(destination):
+ return {}
+
+ carrier = {}
+ opentracing.tracer.inject(
+ opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
+ )
+
+ return carrier
+
+
+@ensure_active_span("get the span context as a string.", ret={})
def active_span_context_as_string():
"""
Returns:
@@ -566,70 +687,30 @@ def extract_text_map(carrier):
# Tracing decorators
-def trace(func):
+def trace(func=None, opname=None):
"""
Decorator to trace a function.
- Sets the operation name to that of the function's.
+ Sets the operation name to that of the function's or that given
+ as operation_name. See the module's doc string for usage
+ examples.
"""
- if opentracing is None:
- return func
-
- @wraps(func)
- def _trace_inner(self, *args, **kwargs):
- if opentracing is None:
- return func(self, *args, **kwargs)
- scope = start_active_span(func.__name__)
- scope.__enter__()
-
- try:
- result = func(self, *args, **kwargs)
- if isinstance(result, defer.Deferred):
-
- def call_back(result):
- scope.__exit__(None, None, None)
- return result
-
- def err_back(result):
- scope.span.set_tag(tags.ERROR, True)
- scope.__exit__(None, None, None)
- return result
-
- result.addCallbacks(call_back, err_back)
-
- else:
- scope.__exit__(None, None, None)
-
- return result
-
- except Exception as e:
- scope.__exit__(type(e), None, e.__traceback__)
- raise
-
- return _trace_inner
-
-
-def trace_using_operation_name(operation_name):
- """Decorator to trace a function. Explicitely sets the operation_name."""
-
- def trace(func):
- """
- Decorator to trace a function.
- Sets the operation name to that of the function's.
- """
+ def decorator(func):
if opentracing is None:
return func
+ _opname = opname if opname else func.__name__
+
@wraps(func)
- def _trace_inner(self, *args, **kwargs):
+ def _trace_inner(*args, **kwargs):
if opentracing is None:
- return func(self, *args, **kwargs)
+ return func(*args, **kwargs)
- scope = start_active_span(operation_name)
+ scope = start_active_span(_opname)
scope.__enter__()
try:
- result = func(self, *args, **kwargs)
+ result = func(*args, **kwargs)
if isinstance(result, defer.Deferred):
def call_back(result):
@@ -642,6 +723,7 @@ def trace_using_operation_name(operation_name):
return result
result.addCallbacks(call_back, err_back)
+
else:
scope.__exit__(None, None, None)
@@ -653,7 +735,10 @@ def trace_using_operation_name(operation_name):
return _trace_inner
- return trace
+ if func:
+ return decorator(func)
+ else:
+ return decorator
def tag_args(func):
@@ -665,76 +750,54 @@ def tag_args(func):
return func
@wraps(func)
- def _tag_args_inner(self, *args, **kwargs):
+ def _tag_args_inner(*args, **kwargs):
argspec = inspect.getargspec(func)
for i, arg in enumerate(argspec.args[1:]):
set_tag("ARG_" + arg, args[i])
set_tag("args", args[len(argspec.args) :])
set_tag("kwargs", kwargs)
- return func(self, *args, **kwargs)
+ return func(*args, **kwargs)
return _tag_args_inner
-def trace_servlet(servlet_name, func):
+def trace_servlet(servlet_name, extract_context=False):
"""Decorator which traces a serlet. It starts a span with some servlet specific
- tags such as the servlet_name and request information"""
- if not opentracing:
- return func
+ tags such as the servlet_name and request information
- @wraps(func)
- @defer.inlineCallbacks
- def _trace_servlet_inner(request, *args, **kwargs):
- with start_active_span(
- "incoming-client-request",
- tags={
+ Args:
+ servlet_name (str): The name to be used for the span's operation_name
+ extract_context (bool): Whether to attempt to extract the opentracing
+ context from the request the servlet is handling.
+
+ """
+
+ def _trace_servlet_inner_1(func):
+ if not opentracing:
+ return func
+
+ @wraps(func)
+ @defer.inlineCallbacks
+ def _trace_servlet_inner(request, *args, **kwargs):
+ request_tags = {
"request_id": request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
tags.PEER_HOST_IPV6: request.getClientIP(),
- "servlet_name": servlet_name,
- },
- ):
- result = yield defer.maybeDeferred(func, request, *args, **kwargs)
- return result
-
- return _trace_servlet_inner
-
-
-# Helper class
+ }
+ if extract_context:
+ scope = start_active_span_from_request(
+ request, servlet_name, tags=request_tags
+ )
+ else:
+ scope = start_active_span(servlet_name, tags=request_tags)
-class _DummyTagNames(object):
- """wrapper of opentracings tags. We need to have them if we
- want to reference them without opentracing around. Clearly they
- should never actually show up in a trace. `set_tags` overwrites
- these with the correct ones."""
-
- INVALID_TAG = "invalid-tag"
- COMPONENT = INVALID_TAG
- DATABASE_INSTANCE = INVALID_TAG
- DATABASE_STATEMENT = INVALID_TAG
- DATABASE_TYPE = INVALID_TAG
- DATABASE_USER = INVALID_TAG
- ERROR = INVALID_TAG
- HTTP_METHOD = INVALID_TAG
- HTTP_STATUS_CODE = INVALID_TAG
- HTTP_URL = INVALID_TAG
- MESSAGE_BUS_DESTINATION = INVALID_TAG
- PEER_ADDRESS = INVALID_TAG
- PEER_HOSTNAME = INVALID_TAG
- PEER_HOST_IPV4 = INVALID_TAG
- PEER_HOST_IPV6 = INVALID_TAG
- PEER_PORT = INVALID_TAG
- PEER_SERVICE = INVALID_TAG
- SAMPLING_PRIORITY = INVALID_TAG
- SERVICE = INVALID_TAG
- SPAN_KIND = INVALID_TAG
- SPAN_KIND_CONSUMER = INVALID_TAG
- SPAN_KIND_PRODUCER = INVALID_TAG
- SPAN_KIND_RPC_CLIENT = INVALID_TAG
- SPAN_KIND_RPC_SERVER = INVALID_TAG
+ with scope:
+ result = yield defer.maybeDeferred(func, request, *args, **kwargs)
+ return result
+ return _trace_servlet_inner
-tags = _DummyTagNames
+ return _trace_servlet_inner_1
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 488280b4a6..bec3b13397 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -20,6 +20,7 @@ import os
import platform
import threading
import time
+from typing import Dict, Union
import six
@@ -29,20 +30,20 @@ from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricF
from twisted.internet import reactor
+import synapse
from synapse.metrics._exposition import (
MetricsResource,
generate_latest,
start_http_server,
)
+from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
METRICS_PREFIX = "/_synapse/metrics"
running_on_pypy = platform.python_implementation() == "PyPy"
-all_metrics = []
-all_collectors = []
-all_gauges = {}
+all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge, BucketCollector]]
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
@@ -385,6 +386,16 @@ event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"
# finished being processed.
event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"])
+# Build info of the running server.
+build_info = Gauge(
+ "synapse_build_info", "Build information", ["pythonversion", "version", "osversion"]
+)
+build_info.labels(
+ " ".join([platform.python_implementation(), platform.python_version()]),
+ get_version_string(synapse),
+ " ".join([platform.system(), platform.release()]),
+).set(1)
+
last_ticked = time.time()
diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py
index 1933ecd3e3..74d9c3ecd3 100644
--- a/synapse/metrics/_exposition.py
+++ b/synapse/metrics/_exposition.py
@@ -36,7 +36,9 @@ from twisted.web.resource import Resource
try:
from prometheus_client.samples import Sample
except ImportError:
- Sample = namedtuple("Sample", ["name", "labels", "value", "timestamp", "exemplar"])
+ Sample = namedtuple(
+ "Sample", ["name", "labels", "value", "timestamp", "exemplar"]
+ ) # type: ignore
CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8")
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 41147d4292..735b882363 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -101,7 +101,7 @@ class ModuleApi(object):
)
user_id = yield self.register_user(localpart, displayname, emails)
_, access_token = yield self.register_device(user_id)
- return (user_id, access_token)
+ return user_id, access_token
def register_user(self, localpart, displayname=None, emails=[]):
"""Registers a new user with given localpart and optional displayname, emails.
diff --git a/synapse/notifier.py b/synapse/notifier.py
index bd80c801b6..4e091314e6 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -472,11 +472,11 @@ class Notifier(object):
joined_room_ids = yield self.store.get_rooms_for_user(user.to_string())
if explicit_room_id:
if explicit_room_id in joined_room_ids:
- return ([explicit_room_id], True)
+ return [explicit_room_id], True
if (yield self._is_world_readable(explicit_room_id)):
- return ([explicit_room_id], False)
+ return [explicit_room_id], False
raise AuthError(403, "Non-joined access not allowed")
- return (joined_room_ids, True)
+ return joined_room_ids, True
@defer.inlineCallbacks
def _is_world_readable(self, room_id):
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index c831975635..22491f3700 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -134,7 +134,7 @@ class BulkPushRuleEvaluator(object):
pl_event = auth_events.get(POWER_KEY)
- return (pl_event.content if pl_event else {}, sender_level)
+ return pl_event.content if pl_event else {}, sender_level
@defer.inlineCallbacks
def action_for_event_by_user(self, event, context):
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index bd5d53af91..a6c1c2a9a9 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -22,6 +22,7 @@ from prometheus_client import Counter
from twisted.internet import defer
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
+from synapse.logging import opentracing
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import PusherConfigException
@@ -101,7 +102,7 @@ class HttpPusher(object):
if "url" not in self.data:
raise PusherConfigException("'url' required in data for HTTP pusher")
self.url = self.data["url"]
- self.http_client = hs.get_simple_http_client()
+ self.http_client = hs.get_proxied_http_client()
self.data_minus_url = {}
self.data_minus_url.update(self.data)
del self.data_minus_url["url"]
@@ -194,7 +195,17 @@ class HttpPusher(object):
)
for push_action in unprocessed:
- processed = yield self._process_one(push_action)
+ with opentracing.start_active_span(
+ "http-push",
+ tags={
+ "authenticated_entity": self.user_id,
+ "event_id": push_action["event_id"],
+ "app_id": self.app_id,
+ "app_display_name": self.app_display_name,
+ },
+ ):
+ processed = yield self._process_one(push_action)
+
if processed:
http_push_processed_counter.inc()
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 4245ce26f3..3dfd527849 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -131,14 +131,11 @@ class Mailer(object):
email_address (str): Email address we're sending the password
reset to
token (str): Unique token generated by the server to verify
- password reset email was received
+ the email was received
client_secret (str): Unique token generated by the client to
group together multiple email sending attempts
sid (str): The generated session ID
"""
- if email.utils.parseaddr(email_address)[1] == "":
- raise RuntimeError("Invalid 'to' email address")
-
link = (
self.hs.config.public_baseurl
+ "_matrix/client/unstable/password_reset/email/submit_token"
@@ -149,7 +146,34 @@ class Mailer(object):
yield self.send_email(
email_address,
- "[%s] Password Reset Email" % self.hs.config.server_name,
+ "[%s] Password Reset" % self.hs.config.server_name,
+ template_vars,
+ )
+
+ @defer.inlineCallbacks
+ def send_registration_mail(self, email_address, token, client_secret, sid):
+ """Send an email with a registration confirmation link to a user
+
+ Args:
+ email_address (str): Email address we're sending the registration
+ link to
+ token (str): Unique token generated by the server to verify
+ the email was received
+ client_secret (str): Unique token generated by the client to
+ group together multiple email sending attempts
+ sid (str): The generated session ID
+ """
+ link = (
+ self.hs.config.public_baseurl
+ + "_matrix/client/unstable/registration/email/submit_token"
+ "?token=%s&client_secret=%s&sid=%s" % (token, client_secret, sid)
+ )
+
+ template_vars = {"link": link}
+
+ yield self.send_email(
+ email_address,
+ "[%s] Register your Email Address" % self.hs.config.server_name,
template_vars,
)
@@ -605,25 +629,50 @@ def format_ts_filter(value, format):
return time.strftime(format, time.localtime(value / 1000))
-def load_jinja2_templates(config, template_html_name, template_text_name):
- """Load the jinja2 email templates from disk
+def load_jinja2_templates(
+ template_dir,
+ template_filenames,
+ apply_format_ts_filter=False,
+ apply_mxc_to_http_filter=False,
+ public_baseurl=None,
+):
+ """Loads and returns one or more jinja2 templates and applies optional filters
+
+ Args:
+ template_dir (str): The directory where templates are stored
+ template_filenames (list[str]): A list of template filenames
+ apply_format_ts_filter (bool): Whether to apply a template filter that formats
+ timestamps
+ apply_mxc_to_http_filter (bool): Whether to apply a template filter that converts
+ mxc urls to http urls
+ public_baseurl (str|None): The public baseurl of the server. Required for
+ apply_mxc_to_http_filter to be enabled
Returns:
- (template_html, template_text)
+ A list of jinja2 templates corresponding to the given list of filenames,
+ with order preserved
"""
- logger.info("loading email templates from '%s'", config.email_template_dir)
- loader = jinja2.FileSystemLoader(config.email_template_dir)
+ logger.info(
+ "loading email templates %s from '%s'", template_filenames, template_dir
+ )
+ loader = jinja2.FileSystemLoader(template_dir)
env = jinja2.Environment(loader=loader)
- env.filters["format_ts"] = format_ts_filter
- env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config)
- template_html = env.get_template(template_html_name)
- template_text = env.get_template(template_text_name)
+ if apply_format_ts_filter:
+ env.filters["format_ts"] = format_ts_filter
+
+ if apply_mxc_to_http_filter and public_baseurl:
+ env.filters["mxc_to_http"] = _create_mxc_to_http_filter(public_baseurl)
+
+ templates = []
+ for template_filename in template_filenames:
+ template = env.get_template(template_filename)
+ templates.append(template)
- return template_html, template_text
+ return templates
-def _create_mxc_to_http_filter(config):
+def _create_mxc_to_http_filter(public_baseurl):
def mxc_to_http_filter(value, width, height, resize_method="crop"):
if value[0:6] != "mxc://":
return ""
@@ -636,7 +685,7 @@ def _create_mxc_to_http_filter(config):
params = {"width": width, "height": height, "method": resize_method}
return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
- config.public_baseurl,
+ public_baseurl,
serverAndMediaId,
urllib.parse.urlencode(params),
fragment or "",
diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py
index a9c64a9c54..f277aeb131 100644
--- a/synapse/push/pusher.py
+++ b/synapse/push/pusher.py
@@ -35,6 +35,7 @@ except Exception:
class PusherFactory(object):
def __init__(self, hs):
self.hs = hs
+ self.config = hs.config
self.pusher_types = {"http": HttpPusher}
@@ -42,12 +43,16 @@ class PusherFactory(object):
if hs.config.email_enable_notifs:
self.mailers = {} # app_name -> Mailer
- templates = load_jinja2_templates(
- config=hs.config,
- template_html_name=hs.config.email_notif_template_html,
- template_text_name=hs.config.email_notif_template_text,
+ self.notif_template_html, self.notif_template_text = load_jinja2_templates(
+ self.config.email_template_dir,
+ [
+ self.config.email_notif_template_html,
+ self.config.email_notif_template_text,
+ ],
+ apply_format_ts_filter=True,
+ apply_mxc_to_http_filter=True,
+ public_baseurl=self.config.public_baseurl,
)
- self.notif_template_html, self.notif_template_text = templates
self.pusher_types["email"] = self._create_email_pusher
@@ -78,6 +83,6 @@ class PusherFactory(object):
if "data" in pusherdict and "brand" in pusherdict["data"]:
app_name = pusherdict["data"]["brand"]
else:
- app_name = self.hs.config.email_app_name
+ app_name = self.config.email_app_name
return app_name
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index c6465c0386..0bd563edc7 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -15,6 +15,7 @@
# limitations under the License.
import logging
+from typing import Set
from pkg_resources import (
DistributionNotFound,
@@ -47,9 +48,9 @@ REQUIREMENTS = [
"idna>=2.5",
# validating SSL certs for IP addresses requires service_identity 18.1.
"service_identity>=18.1.0",
- # our logcontext handling relies on the ability to cancel inlineCallbacks
- # (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
- "Twisted>=18.7.0",
+ # Twisted 18.9 introduces some logger improvements that the structured
+ # logger utilises
+ "Twisted>=18.9.0",
"treq>=15.1",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
"pyopenssl>=16.0.0",
@@ -97,7 +98,7 @@ CONDITIONAL_REQUIREMENTS = {
"jwt": ["pyjwt>=1.6.4"],
}
-ALL_OPTIONAL_REQUIREMENTS = set()
+ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str]
for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
# Exclude systemd as it's a system-based requirement.
@@ -147,7 +148,13 @@ def check_requirements(for_feature=None):
)
except DistributionNotFound:
deps_needed.append(dependency)
- errors.append("Needed %s but it was not installed" % (dependency,))
+ if for_feature:
+ errors.append(
+ "Needed %s for the '%s' feature but it was not installed"
+ % (dependency, for_feature)
+ )
+ else:
+ errors.append("Needed %s but it was not installed" % (dependency,))
if not for_feature:
# Check the optional dependencies are up to date. We allow them to not be
@@ -168,8 +175,8 @@ def check_requirements(for_feature=None):
pass
if deps_needed:
- for e in errors:
- logging.error(e)
+ for err in errors:
+ logging.error(err)
raise DependencyException(deps_needed)
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index 2e0594e581..03560c1f0e 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -28,6 +28,11 @@ from synapse.api.errors import (
RequestSendFailed,
SynapseError,
)
+from synapse.logging.opentracing import (
+ inject_active_span_byte_dict,
+ trace,
+ trace_servlet,
+)
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import random_string
@@ -128,6 +133,7 @@ class ReplicationEndpoint(object):
client = hs.get_simple_http_client()
+ @trace(opname="outgoing_replication_request")
@defer.inlineCallbacks
def send_request(**kwargs):
data = yield cls._serialize_payload(**kwargs)
@@ -165,8 +171,10 @@ class ReplicationEndpoint(object):
# have a good idea that the request has either succeeded or failed on
# the master, and so whether we should clean up or not.
while True:
+ headers = {}
+ inject_active_span_byte_dict(headers, None, check_destination=False)
try:
- result = yield request_func(uri, data)
+ result = yield request_func(uri, data, headers=headers)
break
except CodeMessageException as e:
if e.code != 504 or not cls.RETRY_ON_TIMEOUT:
@@ -205,7 +213,12 @@ class ReplicationEndpoint(object):
args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args)
pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args))
- http_server.register_paths(method, [pattern], handler, self.__class__.__name__)
+ handler = trace_servlet(self.__class__.__name__, extract_context=True)(handler)
+ # We don't let register paths trace this servlet using the default tracing
+ # options because we wish to extract the context explicitly.
+ http_server.register_paths(
+ method, [pattern], handler, self.__class__.__name__, trace=False
+ )
def _cached_handler(self, request, txn_id, **kwargs):
"""Called on new incoming requests when caching is enabled. Checks
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index fed4f08820..2f16955954 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -113,7 +113,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
event_and_contexts, backfilled
)
- return (200, {})
+ return 200, {}
class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
@@ -156,7 +156,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
result = yield self.registry.on_edu(edu_type, origin, edu_content)
- return (200, result)
+ return 200, result
class ReplicationGetQueryRestServlet(ReplicationEndpoint):
@@ -204,7 +204,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
result = yield self.registry.on_query(query_type, args)
- return (200, result)
+ return 200, result
class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
@@ -238,7 +238,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
def _handle_request(self, request, room_id):
yield self.store.clean_room_for_join(room_id)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py
index f17d3a2da4..786f5232b2 100644
--- a/synapse/replication/http/login.py
+++ b/synapse/replication/http/login.py
@@ -64,7 +64,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
user_id, device_id, initial_display_name, is_guest
)
- return (200, {"device_id": device_id, "access_token": access_token})
+ return 200, {"device_id": device_id, "access_token": access_token}
def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index 4217335d88..b9ce3477ad 100644
--- a/synapse/replication/http/membership.py
+++ b/synapse/replication/http/membership.py
@@ -83,7 +83,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
remote_room_hosts, room_id, user_id, event_content
)
- return (200, {})
+ return 200, {}
class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
@@ -153,7 +153,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
yield self.store.locally_reject_invite(user_id, room_id)
ret = {}
- return (200, ret)
+ return 200, ret
class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
@@ -202,7 +202,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
else:
raise Exception("Unrecognized change: %r", change)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py
index 3341320a87..38260256cf 100644
--- a/synapse/replication/http/register.py
+++ b/synapse/replication/http/register.py
@@ -90,7 +90,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
address=content["address"],
)
- return (200, {})
+ return 200, {}
class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
@@ -106,7 +106,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
self.registration_handler = hs.get_registration_handler()
@staticmethod
- def _serialize_payload(user_id, auth_result, access_token, bind_email, bind_msisdn):
+ def _serialize_payload(user_id, auth_result, access_token):
"""
Args:
user_id (str): The user ID that consented
@@ -114,17 +114,8 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
registered user.
access_token (str|None): The access token of the newly logged in
device, or None if `inhibit_login` enabled.
- bind_email (bool): Whether to bind the email with the identity
- server
- bind_msisdn (bool): Whether to bind the msisdn with the identity
- server
"""
- return {
- "auth_result": auth_result,
- "access_token": access_token,
- "bind_email": bind_email,
- "bind_msisdn": bind_msisdn,
- }
+ return {"auth_result": auth_result, "access_token": access_token}
@defer.inlineCallbacks
def _handle_request(self, request, user_id):
@@ -132,18 +123,12 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
auth_result = content["auth_result"]
access_token = content["access_token"]
- bind_email = content["bind_email"]
- bind_msisdn = content["bind_msisdn"]
yield self.registration_handler.post_registration_actions(
- user_id=user_id,
- auth_result=auth_result,
- access_token=access_token,
- bind_email=bind_email,
- bind_msisdn=bind_msisdn,
+ user_id=user_id, auth_result=auth_result, access_token=access_token
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index eff7bd7305..adb9b2f7f4 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -117,7 +117,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index c10b85d2ff..f03111c259 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -158,7 +158,7 @@ class Stream(object):
updates, current_token = yield self.get_updates_since(self.last_token)
self.last_token = current_token
- return (updates, current_token)
+ return updates, current_token
@defer.inlineCallbacks
def get_updates_since(self, from_token):
@@ -172,14 +172,14 @@ class Stream(object):
sent over the replication steam.
"""
if from_token in ("NOW", "now"):
- return ([], self.upto_token)
+ return [], self.upto_token
current_token = self.upto_token
from_token = int(from_token)
if from_token == current_token:
- return ([], current_token)
+ return [], current_token
if self._LIMITED:
rows = yield self.update_function(
@@ -198,7 +198,7 @@ class Stream(object):
if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND:
raise Exception("stream %s has fallen behind" % (self.NAME))
- return (updates, current_token)
+ return updates, current_token
def current_token(self):
"""Gets the current token of the underlying streams. Should be provided
diff --git a/synapse/res/templates/password_reset.html b/synapse/res/templates/password_reset.html
index 4fa7b36734..a197bf872c 100644
--- a/synapse/res/templates/password_reset.html
+++ b/synapse/res/templates/password_reset.html
@@ -4,6 +4,6 @@
<a href="{{ link }}">{{ link }}</a>
- <p>If this was not you, please disregard this email and contact your server administrator. Thank you.</p>
+ <p>If this was not you, <strong>do not</strong> click the link above and instead contact your server administrator. Thank you.</p>
</body>
</html>
diff --git a/synapse/res/templates/password_reset.txt b/synapse/res/templates/password_reset.txt
index f0deff59a7..6aa6527560 100644
--- a/synapse/res/templates/password_reset.txt
+++ b/synapse/res/templates/password_reset.txt
@@ -3,5 +3,5 @@ was you, please click the link below to confirm resetting your password:
{{ link }}
-If this was not you, please disregard this email and contact your server
-administrator. Thank you.
+If this was not you, DO NOT click the link above and instead contact your
+server administrator. Thank you.
diff --git a/synapse/res/templates/password_reset_failure.html b/synapse/res/templates/password_reset_failure.html
index 0b132cf8db..9e3c4446e3 100644
--- a/synapse/res/templates/password_reset_failure.html
+++ b/synapse/res/templates/password_reset_failure.html
@@ -1,6 +1,8 @@
<html>
<head></head>
<body>
-<p>{{ failure_reason }}. Your password has not been reset.</p>
+<p>The request failed for the following reason: {{ failure_reason }}.</p>
+
+<p>Your password has not been reset.</p>
</body>
</html>
diff --git a/synapse/res/templates/registration.html b/synapse/res/templates/registration.html
new file mode 100644
index 0000000000..16730a527f
--- /dev/null
+++ b/synapse/res/templates/registration.html
@@ -0,0 +1,11 @@
+<html>
+<body>
+ <p>You have asked us to register this email with a new Matrix account. If this was you, please click the link below to confirm your email address:</p>
+
+ <a href="{{ link }}">Verify Your Email Address</a>
+
+ <p>If this was not you, you can safely disregard this email.</p>
+
+ <p>Thank you.</p>
+</body>
+</html>
diff --git a/synapse/res/templates/registration.txt b/synapse/res/templates/registration.txt
new file mode 100644
index 0000000000..cb4f16a90c
--- /dev/null
+++ b/synapse/res/templates/registration.txt
@@ -0,0 +1,10 @@
+Hello there,
+
+You have asked us to register this email with a new Matrix account. If this
+was you, please click the link below to confirm your email address:
+
+{{ link }}
+
+If this was not you, you can safely disregard this email.
+
+Thank you.
diff --git a/synapse/res/templates/registration_failure.html b/synapse/res/templates/registration_failure.html
new file mode 100644
index 0000000000..2833d79c37
--- /dev/null
+++ b/synapse/res/templates/registration_failure.html
@@ -0,0 +1,6 @@
+<html>
+<head></head>
+<body>
+<p>Validation failed for the following reason: {{ failure_reason }}.</p>
+</body>
+</html>
diff --git a/synapse/res/templates/registration_success.html b/synapse/res/templates/registration_success.html
new file mode 100644
index 0000000000..fbd6e4018f
--- /dev/null
+++ b/synapse/res/templates/registration_success.html
@@ -0,0 +1,6 @@
+<html>
+<head></head>
+<body>
+<p>Your email has now been validated, please return to your client. You may now close this window.</p>
+</body>
+</html>
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 1d20b96d03..14eca70ba4 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -41,6 +41,7 @@ from synapse.rest.client.v2_alpha import (
keys,
notifications,
openid,
+ password_policy,
read_marker,
receipts,
register,
@@ -73,7 +74,7 @@ class ClientRestResource(JsonResource):
@staticmethod
def register_servlets(client_resource, hs):
- versions.register_servlets(client_resource)
+ versions.register_servlets(hs, client_resource)
# Deprecated in r0
initial_sync.register_servlets(hs, client_resource)
@@ -117,6 +118,7 @@ class ClientRestResource(JsonResource):
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
capabilities.register_servlets(hs, client_resource)
account_validity.register_servlets(hs, client_resource)
+ password_policy.register_servlets(hs, client_resource)
relations.register_servlets(hs, client_resource)
# moving to /_synapse/admin
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 5720cab425..81b6bd8816 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -41,8 +41,10 @@ from synapse.rest.admin._base import (
assert_user_is_admin,
historical_admin_path_patterns,
)
-from synapse.rest.admin.media import register_servlets_for_media_repo
+from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
+from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
+from synapse.rest.admin.users import UserAdminServlet
from synapse.types import UserID, create_requester
from synapse.util.versionstring import get_version_string
@@ -50,7 +52,7 @@ logger = logging.getLogger(__name__)
class UsersRestServlet(RestServlet):
- PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)")
+ PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)$")
def __init__(self, hs):
self.hs = hs
@@ -67,7 +69,7 @@ class UsersRestServlet(RestServlet):
ret = yield self.handlers.admin_handler.get_users()
- return (200, ret)
+ return 200, ret
class VersionServlet(RestServlet):
@@ -118,7 +120,7 @@ class UserRegisterServlet(RestServlet):
nonce = self.hs.get_secrets().token_hex(64)
self.nonces[nonce] = int(self.reactor.seconds())
- return (200, {"nonce": nonce})
+ return 200, {"nonce": nonce}
@defer.inlineCallbacks
def on_POST(self, request):
@@ -210,7 +212,7 @@ class UserRegisterServlet(RestServlet):
)
result = yield register._create_registration_details(user_id, body)
- return (200, result)
+ return 200, result
class WhoisRestServlet(RestServlet):
@@ -235,7 +237,7 @@ class WhoisRestServlet(RestServlet):
ret = yield self.handlers.admin_handler.get_whois(target_user)
- return (200, ret)
+ return 200, ret
class PurgeHistoryRestServlet(RestServlet):
@@ -320,7 +322,7 @@ class PurgeHistoryRestServlet(RestServlet):
room_id, token, delete_local_events=delete_local_events
)
- return (200, {"purge_id": purge_id})
+ return 200, {"purge_id": purge_id}
class PurgeHistoryStatusRestServlet(RestServlet):
@@ -345,7 +347,7 @@ class PurgeHistoryStatusRestServlet(RestServlet):
if purge_status is None:
raise NotFoundError("purge id '%s' not found" % purge_id)
- return (200, purge_status.asdict())
+ return 200, purge_status.asdict()
class DeactivateAccountRestServlet(RestServlet):
@@ -377,7 +379,7 @@ class DeactivateAccountRestServlet(RestServlet):
else:
id_server_unbind_result = "no-support"
- return (200, {"id_server_unbind_result": id_server_unbind_result})
+ return 200, {"id_server_unbind_result": id_server_unbind_result}
class ShutdownRoomRestServlet(RestServlet):
@@ -547,7 +549,7 @@ class ResetPasswordRestServlet(RestServlet):
yield self._set_password_handler.set_password(
target_user_id, new_password, requester
)
- return (200, {})
+ return 200, {}
class GetUsersPaginatedRestServlet(RestServlet):
@@ -589,7 +591,7 @@ class GetUsersPaginatedRestServlet(RestServlet):
logger.info("limit: %s, start: %s", limit, start)
ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit)
- return (200, ret)
+ return 200, ret
@defer.inlineCallbacks
def on_POST(self, request, target_user_id):
@@ -617,7 +619,7 @@ class GetUsersPaginatedRestServlet(RestServlet):
logger.info("limit: %s, start: %s", limit, start)
ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit)
- return (200, ret)
+ return 200, ret
class SearchUsersRestServlet(RestServlet):
@@ -660,7 +662,7 @@ class SearchUsersRestServlet(RestServlet):
logger.info("term: %s ", term)
ret = yield self.handlers.admin_handler.search_users(term)
- return (200, ret)
+ return 200, ret
class DeleteGroupAdminRestServlet(RestServlet):
@@ -683,7 +685,7 @@ class DeleteGroupAdminRestServlet(RestServlet):
raise SynapseError(400, "Can only delete local groups")
yield self.group_server.delete_group(group_id, requester.user.to_string())
- return (200, {})
+ return 200, {}
class AccountValidityRenewServlet(RestServlet):
@@ -714,7 +716,7 @@ class AccountValidityRenewServlet(RestServlet):
)
res = {"expiration_ts": expiration_ts}
- return (200, res)
+ return 200, res
########################################################################################
@@ -738,8 +740,10 @@ def register_servlets(hs, http_server):
Register all the admin servlets.
"""
register_servlets_for_client_rest_resource(hs, http_server)
+ PurgeRoomServlet(hs).register(http_server)
SendServerNoticeServlet(hs).register(http_server)
VersionServlet(hs).register(http_server)
+ UserAdminServlet(hs).register(http_server)
def register_servlets_for_client_rest_resource(hs, http_server):
@@ -757,9 +761,12 @@ def register_servlets_for_client_rest_resource(hs, http_server):
DeleteGroupAdminRestServlet(hs).register(http_server)
AccountValidityRenewServlet(hs).register(http_server)
- # Load the media repo ones if we're using them.
+ # Load the media repo ones if we're using them. Otherwise load the servlets which
+ # don't need a media repo (typically readonly admin APIs).
if hs.config.can_load_media_repo:
register_servlets_for_media_repo(hs, http_server)
+ else:
+ ListMediaInRoom(hs).register(http_server)
# don't add more things here: new servlets should only be exposed on
# /_synapse/admin so should not go here. Instead register them in AdminRestResource.
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index 824df919f2..ed7086d09c 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -49,7 +49,7 @@ class QuarantineMediaInRoom(RestServlet):
room_id, requester.user.to_string()
)
- return (200, {"num_quarantined": num_quarantined})
+ return 200, {"num_quarantined": num_quarantined}
class ListMediaInRoom(RestServlet):
@@ -60,6 +60,7 @@ class ListMediaInRoom(RestServlet):
def __init__(self, hs):
self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
@defer.inlineCallbacks
def on_GET(self, request, room_id):
@@ -70,7 +71,7 @@ class ListMediaInRoom(RestServlet):
local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id)
- return (200, {"local": local_mxcs, "remote": remote_mxcs})
+ return 200, {"local": local_mxcs, "remote": remote_mxcs}
class PurgeMediaCacheRestServlet(RestServlet):
@@ -89,7 +90,7 @@ class PurgeMediaCacheRestServlet(RestServlet):
ret = yield self.media_repository.delete_old_remote_media(before_ts)
- return (200, ret)
+ return 200, ret
def register_servlets_for_media_repo(hs, http_server):
diff --git a/synapse/rest/admin/purge_room_servlet.py b/synapse/rest/admin/purge_room_servlet.py
new file mode 100644
index 0000000000..f474066542
--- /dev/null
+++ b/synapse/rest/admin/purge_room_servlet.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+
+from synapse.http.servlet import (
+ RestServlet,
+ assert_params_in_dict,
+ parse_json_object_from_request,
+)
+from synapse.rest.admin import assert_requester_is_admin
+
+
+class PurgeRoomServlet(RestServlet):
+ """Servlet which will remove all trace of a room from the database
+
+ POST /_synapse/admin/v1/purge_room
+ {
+ "room_id": "!room:id"
+ }
+
+ returns:
+
+ {}
+ """
+
+ PATTERNS = (re.compile("^/_synapse/admin/v1/purge_room$"),)
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.pagination_handler = hs.get_pagination_handler()
+
+ async def on_POST(self, request):
+ await assert_requester_is_admin(self.auth, request)
+
+ body = parse_json_object_from_request(request)
+ assert_params_in_dict(body, ("room_id",))
+
+ await self.pagination_handler.purge_room(body["room_id"])
+
+ return 200, {}
diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py
index 656526fea5..ae2cbe2e0a 100644
--- a/synapse/rest/admin/server_notice_servlet.py
+++ b/synapse/rest/admin/server_notice_servlet.py
@@ -92,7 +92,7 @@ class SendServerNoticeServlet(RestServlet):
event_content=body["content"],
)
- return (200, {"event_id": event.event_id})
+ return 200, {"event_id": event.event_id}
def on_PUT(self, request, txn_id):
return self.txns.fetch_or_execute_request(
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
new file mode 100644
index 0000000000..9720a3bab0
--- /dev/null
+++ b/synapse/rest/admin/users.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import (
+ RestServlet,
+ assert_params_in_dict,
+ parse_json_object_from_request,
+)
+from synapse.rest.admin import assert_requester_is_admin, assert_user_is_admin
+from synapse.types import UserID
+
+
+class UserAdminServlet(RestServlet):
+ """
+ Get or set whether or not a user is a server administrator.
+
+ Note that only local users can be server administrators, and that an
+ administrator may not demote themselves.
+
+ Only server administrators can use this API.
+
+ Examples:
+ * Get
+ GET /_synapse/admin/v1/users/@nonadmin:example.com/admin
+ response on success:
+ {
+ "admin": false
+ }
+ * Set
+ PUT /_synapse/admin/v1/users/@reivilibre:librepush.net/admin
+ request body:
+ {
+ "admin": true
+ }
+ response on success:
+ {}
+ """
+
+ PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P<user_id>@[^/]*)/admin$"),)
+
+ def __init__(self, hs):
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.handlers = hs.get_handlers()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id):
+ yield assert_requester_is_admin(self.auth, request)
+
+ target_user = UserID.from_string(user_id)
+
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "Only local users can be admins of this homeserver")
+
+ is_admin = yield self.handlers.admin_handler.get_user_server_admin(target_user)
+ is_admin = bool(is_admin)
+
+ return 200, {"admin": is_admin}
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, user_id):
+ requester = yield self.auth.get_user_by_req(request)
+ yield assert_user_is_admin(self.auth, requester.user)
+ auth_user = requester.user
+
+ target_user = UserID.from_string(user_id)
+
+ body = parse_json_object_from_request(request)
+
+ assert_params_in_dict(body, ["admin"])
+
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "Only local users can be admins of this homeserver")
+
+ set_admin_to = bool(body["admin"])
+
+ if target_user == auth_user and not set_admin_to:
+ raise SynapseError(400, "You may not demote yourself.")
+
+ yield self.handlers.admin_handler.set_user_server_admin(
+ target_user, set_admin_to
+ )
+
+ return 200, {}
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py
index 4284738021..4ea3666874 100644
--- a/synapse/rest/client/v1/directory.py
+++ b/synapse/rest/client/v1/directory.py
@@ -54,7 +54,7 @@ class ClientDirectoryServer(RestServlet):
dir_handler = self.handlers.directory_handler
res = yield dir_handler.get_association(room_alias)
- return (200, res)
+ return 200, res
@defer.inlineCallbacks
def on_PUT(self, request, room_alias):
@@ -87,7 +87,7 @@ class ClientDirectoryServer(RestServlet):
requester, room_alias, room_id, servers
)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_DELETE(self, request, room_alias):
@@ -102,7 +102,7 @@ class ClientDirectoryServer(RestServlet):
service.url,
room_alias.to_string(),
)
- return (200, {})
+ return 200, {}
except InvalidClientCredentialsError:
# fallback to default user behaviour if they aren't an AS
pass
@@ -118,7 +118,7 @@ class ClientDirectoryServer(RestServlet):
"User %s deleted alias %s", user.to_string(), room_alias.to_string()
)
- return (200, {})
+ return 200, {}
class ClientDirectoryListServer(RestServlet):
@@ -136,7 +136,7 @@ class ClientDirectoryListServer(RestServlet):
if room is None:
raise NotFoundError("Unknown room")
- return (200, {"visibility": "public" if room["is_public"] else "private"})
+ return 200, {"visibility": "public" if room["is_public"] else "private"}
@defer.inlineCallbacks
def on_PUT(self, request, room_id):
@@ -149,7 +149,7 @@ class ClientDirectoryListServer(RestServlet):
requester, room_id, visibility
)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_DELETE(self, request, room_id):
@@ -159,7 +159,7 @@ class ClientDirectoryListServer(RestServlet):
requester, room_id, "private"
)
- return (200, {})
+ return 200, {}
class ClientAppserviceDirectoryListServer(RestServlet):
@@ -193,4 +193,4 @@ class ClientAppserviceDirectoryListServer(RestServlet):
requester.app_service.id, network_id, room_id, visibility
)
- return (200, {})
+ return 200, {}
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index 53ebed2203..6651b4cf07 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -67,10 +67,10 @@ class EventStreamRestServlet(RestServlet):
is_guest=is_guest,
)
- return (200, chunk)
+ return 200, chunk
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
# TODO: Unit test gets, with and without auth, with different kinds of events.
@@ -91,9 +91,9 @@ class EventRestServlet(RestServlet):
time_now = self.clock.time_msec()
if event:
event = yield self._event_serializer.serialize_event(event, time_now)
- return (200, event)
+ return 200, event
else:
- return (404, "Event not found.")
+ return 404, "Event not found."
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py
index 70b8478e90..2da3cd7511 100644
--- a/synapse/rest/client/v1/initial_sync.py
+++ b/synapse/rest/client/v1/initial_sync.py
@@ -42,7 +42,7 @@ class InitialSyncRestServlet(RestServlet):
include_archived=include_archived,
)
- return (200, content)
+ return 200, content
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 5762b9fd06..b74cb15c1f 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -121,10 +121,10 @@ class LoginRestServlet(RestServlet):
({"type": t} for t in self.auth_handler.get_supported_login_types())
)
- return (200, {"flows": flows})
+ return 200, {"flows": flows}
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_POST(self, request):
@@ -152,7 +152,7 @@ class LoginRestServlet(RestServlet):
well_known_data = self._well_known_builder.get_well_known()
if well_known_data:
result["well_known"] = well_known_data
- return (200, result)
+ return 200, result
@defer.inlineCallbacks
def _do_other_login(self, login_submission):
@@ -378,7 +378,7 @@ class CasTicketServlet(RestServlet):
self.cas_service_url = hs.config.cas_service_url
self.cas_required_attributes = hs.config.cas_required_attributes
self._sso_auth_handler = SSOAuthHandler(hs)
- self._http_client = hs.get_simple_http_client()
+ self._http_client = hs.get_proxied_http_client()
@defer.inlineCallbacks
def on_GET(self, request):
diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py
index 2769f3a189..4785a34d75 100644
--- a/synapse/rest/client/v1/logout.py
+++ b/synapse/rest/client/v1/logout.py
@@ -33,7 +33,7 @@ class LogoutRestServlet(RestServlet):
self._device_handler = hs.get_device_handler()
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_POST(self, request):
@@ -49,7 +49,7 @@ class LogoutRestServlet(RestServlet):
requester.user.to_string(), requester.device_id
)
- return (200, {})
+ return 200, {}
class LogoutAllRestServlet(RestServlet):
@@ -62,7 +62,7 @@ class LogoutAllRestServlet(RestServlet):
self._device_handler = hs.get_device_handler()
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_POST(self, request):
@@ -75,7 +75,7 @@ class LogoutAllRestServlet(RestServlet):
# .. and then delete any access tokens which weren't associated with
# devices.
yield self._auth_handler.delete_access_tokens_for_user(user_id)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py
index 1eb1068c98..0153525cef 100644
--- a/synapse/rest/client/v1/presence.py
+++ b/synapse/rest/client/v1/presence.py
@@ -56,7 +56,7 @@ class PresenceStatusRestServlet(RestServlet):
state = yield self.presence_handler.get_state(target_user=user)
state = format_user_presence_state(state, self.clock.time_msec())
- return (200, state)
+ return 200, state
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
@@ -88,10 +88,10 @@ class PresenceStatusRestServlet(RestServlet):
if self.hs.config.use_presence:
yield self.presence_handler.set_state(user, state)
- return (200, {})
+ return 200, {}
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index 2657ae45bb..400a0a7592 100644
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/v1/profile.py
@@ -14,12 +14,16 @@
# limitations under the License.
""" This module contains REST servlets to do with profile: /profile/<paths> """
+import logging
+
from twisted.internet import defer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.types import UserID
+logger = logging.getLogger(__name__)
+
class ProfileDisplaynameRestServlet(RestServlet):
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/displayname", v1=True)
@@ -28,6 +32,7 @@ class ProfileDisplaynameRestServlet(RestServlet):
super(ProfileDisplaynameRestServlet, self).__init__()
self.hs = hs
self.profile_handler = hs.get_profile_handler()
+ self.http_client = hs.get_simple_http_client()
self.auth = hs.get_auth()
@defer.inlineCallbacks
@@ -48,7 +53,7 @@ class ProfileDisplaynameRestServlet(RestServlet):
if displayname is not None:
ret["displayname"] = displayname
- return (200, ret)
+ return 200, ret
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
@@ -61,14 +66,30 @@ class ProfileDisplaynameRestServlet(RestServlet):
try:
new_name = content["displayname"]
except Exception:
- return (400, "Unable to parse name")
+ return 400, "Unable to parse name"
yield self.profile_handler.set_displayname(user, requester, new_name, is_admin)
- return (200, {})
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(user.localpart, self.hs.config.shadow_server.get("hs"))
+ self.shadow_displayname(shadow_user.to_string(), content)
+
+ return 200, {}
def on_OPTIONS(self, request, user_id):
- return (200, {})
+ return 200, {}
+
+ @defer.inlineCallbacks
+ def shadow_displayname(self, user_id, body):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.put_json(
+ "%s/_matrix/client/r0/profile/%s/displayname?access_token=%s&user_id=%s"
+ % (shadow_hs_url, user_id, as_token, user_id),
+ body,
+ )
class ProfileAvatarURLRestServlet(RestServlet):
@@ -78,6 +99,7 @@ class ProfileAvatarURLRestServlet(RestServlet):
super(ProfileAvatarURLRestServlet, self).__init__()
self.hs = hs
self.profile_handler = hs.get_profile_handler()
+ self.http_client = hs.get_simple_http_client()
self.auth = hs.get_auth()
@defer.inlineCallbacks
@@ -98,7 +120,7 @@ class ProfileAvatarURLRestServlet(RestServlet):
if avatar_url is not None:
ret["avatar_url"] = avatar_url
- return (200, ret)
+ return 200, ret
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
@@ -108,16 +130,34 @@ class ProfileAvatarURLRestServlet(RestServlet):
content = parse_json_object_from_request(request)
try:
- new_name = content["avatar_url"]
+ new_avatar_url = content["avatar_url"]
except Exception:
- return (400, "Unable to parse name")
+ return 400, "Unable to parse name"
+
+ yield self.profile_handler.set_avatar_url(
+ user, requester, new_avatar_url, is_admin
+ )
- yield self.profile_handler.set_avatar_url(user, requester, new_name, is_admin)
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(user.localpart, self.hs.config.shadow_server.get("hs"))
+ self.shadow_avatar_url(shadow_user.to_string(), content)
- return (200, {})
+ return 200, {}
def on_OPTIONS(self, request, user_id):
- return (200, {})
+ return 200, {}
+
+ @defer.inlineCallbacks
+ def shadow_avatar_url(self, user_id, body):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.put_json(
+ "%s/_matrix/client/r0/profile/%s/avatar_url?access_token=%s&user_id=%s"
+ % (shadow_hs_url, user_id, as_token, user_id),
+ body,
+ )
class ProfileRestServlet(RestServlet):
@@ -150,7 +190,7 @@ class ProfileRestServlet(RestServlet):
if avatar_url is not None:
ret["avatar_url"] = avatar_url
- return (200, ret)
+ return 200, ret
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index c3ae8b98a8..9f8c3d09e3 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -69,7 +69,7 @@ class PushRuleRestServlet(RestServlet):
if "attr" in spec:
yield self.set_rule_attr(user_id, spec, content)
self.notify_user(user_id)
- return (200, {})
+ return 200, {}
if spec["rule_id"].startswith("."):
# Rule ids starting with '.' are reserved for server default rules.
@@ -106,7 +106,7 @@ class PushRuleRestServlet(RestServlet):
except RuleNotFoundException as e:
raise SynapseError(400, str(e))
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_DELETE(self, request, path):
@@ -123,7 +123,7 @@ class PushRuleRestServlet(RestServlet):
try:
yield self.store.delete_push_rule(user_id, namespaced_rule_id)
self.notify_user(user_id)
- return (200, {})
+ return 200, {}
except StoreError as e:
if e.code == 404:
raise NotFoundError()
@@ -151,10 +151,10 @@ class PushRuleRestServlet(RestServlet):
)
if path[0] == "":
- return (200, rules)
+ return 200, rules
elif path[0] == "global":
result = _filter_ruleset_with_path(rules["global"], path[1:])
- return (200, result)
+ return 200, result
else:
raise UnrecognizedRequestError()
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index ebc3dec516..41660682d9 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -62,7 +62,7 @@ class PushersRestServlet(RestServlet):
if k not in allowed_keys:
del p[k]
- return (200, {"pushers": pushers})
+ return 200, {"pushers": pushers}
def on_OPTIONS(self, _):
return 200, {}
@@ -94,7 +94,7 @@ class PushersSetRestServlet(RestServlet):
yield self.pusher_pool.remove_pusher(
content["app_id"], content["pushkey"], user_id=user.to_string()
)
- return (200, {})
+ return 200, {}
assert_params_in_dict(
content,
@@ -143,7 +143,7 @@ class PushersSetRestServlet(RestServlet):
self.notifier.on_new_replication_data()
- return (200, {})
+ return 200, {}
def on_OPTIONS(self, _):
return 200, {}
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 4b2344e696..3bf7cae5a4 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -91,14 +91,14 @@ class RoomCreateRestServlet(TransactionRestServlet):
requester, self.get_room_config(request)
)
- return (200, info)
+ return 200, info
def get_room_config(self, request):
user_supplied_config = parse_json_object_from_request(request)
return user_supplied_config
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
# TODO: Needs unit testing for generic events
@@ -173,9 +173,9 @@ class RoomStateEventRestServlet(TransactionRestServlet):
if format == "event":
event = format_event_for_client_v2(data.get_dict())
- return (200, event)
+ return 200, event
elif format == "content":
- return (200, data.get_dict()["content"])
+ return 200, data.get_dict()["content"]
@defer.inlineCallbacks
def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
@@ -210,7 +210,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
ret = {}
if event:
ret = {"event_id": event.event_id}
- return (200, ret)
+ return 200, ret
# TODO: Needs unit testing for generic events + feedback
@@ -244,10 +244,10 @@ class RoomSendEventRestServlet(TransactionRestServlet):
requester, event_dict, txn_id=txn_id
)
- return (200, {"event_id": event.event_id})
+ return 200, {"event_id": event.event_id}
def on_GET(self, request, room_id, event_type, txn_id):
- return (200, "Not implemented")
+ return 200, "Not implemented"
def on_PUT(self, request, room_id, event_type, txn_id):
return self.txns.fetch_or_execute_request(
@@ -307,7 +307,7 @@ class JoinRoomAliasServlet(TransactionRestServlet):
third_party_signed=content.get("third_party_signed", None),
)
- return (200, {"room_id": room_id})
+ return 200, {"room_id": room_id}
def on_PUT(self, request, room_identifier, txn_id):
return self.txns.fetch_or_execute_request(
@@ -360,7 +360,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
limit=limit, since_token=since_token
)
- return (200, data)
+ return 200, data
@defer.inlineCallbacks
def on_POST(self, request):
@@ -405,7 +405,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
network_tuple=network_tuple,
)
- return (200, data)
+ return 200, data
# TODO: Needs unit testing
@@ -456,7 +456,7 @@ class RoomMemberListRestServlet(RestServlet):
continue
chunk.append(event)
- return (200, {"chunk": chunk})
+ return 200, {"chunk": chunk}
# deprecated in favour of /members?membership=join?
@@ -477,7 +477,7 @@ class JoinedRoomMemberListRestServlet(RestServlet):
requester, room_id
)
- return (200, {"joined": users_with_profile})
+ return 200, {"joined": users_with_profile}
# TODO: Needs better unit testing
@@ -510,7 +510,7 @@ class RoomMessageListRestServlet(RestServlet):
event_filter=event_filter,
)
- return (200, msgs)
+ return 200, msgs
# TODO: Needs unit testing
@@ -531,7 +531,7 @@ class RoomStateRestServlet(RestServlet):
user_id=requester.user.to_string(),
is_guest=requester.is_guest,
)
- return (200, events)
+ return 200, events
# TODO: Needs unit testing
@@ -550,7 +550,7 @@ class RoomInitialSyncRestServlet(RestServlet):
content = yield self.initial_sync_handler.room_initial_sync(
room_id=room_id, requester=requester, pagin_config=pagination_config
)
- return (200, content)
+ return 200, content
class RoomEventServlet(RestServlet):
@@ -581,7 +581,7 @@ class RoomEventServlet(RestServlet):
time_now = self.clock.time_msec()
if event:
event = yield self._event_serializer.serialize_event(event, time_now)
- return (200, event)
+ return 200, event
return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
@@ -633,7 +633,7 @@ class RoomEventContextServlet(RestServlet):
results["state"], time_now
)
- return (200, results)
+ return 200, results
class RoomForgetRestServlet(TransactionRestServlet):
@@ -652,7 +652,7 @@ class RoomForgetRestServlet(TransactionRestServlet):
yield self.room_member_handler.forget(user=requester.user, room_id=room_id)
- return (200, {})
+ return 200, {}
def on_PUT(self, request, room_id, txn_id):
return self.txns.fetch_or_execute_request(
@@ -701,9 +701,10 @@ class RoomMembershipRestServlet(TransactionRestServlet):
content["id_server"],
requester,
txn_id,
+ new_room=False,
+ id_access_token=content.get("id_access_token"),
)
- return (200, {})
- return
+ return 200, {}
target = requester.user
if membership_action in ["invite", "ban", "unban", "kick"]:
@@ -729,7 +730,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
if membership_action == "join":
return_value["room_id"] = room_id
- return (200, return_value)
+ return 200, return_value
def _has_3pid_invite_keys(self, content):
for key in {"id_server", "medium", "address"}:
@@ -771,7 +772,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
txn_id=txn_id,
)
- return (200, {"event_id": event.event_id})
+ return 200, {"event_id": event.event_id}
def on_PUT(self, request, room_id, event_id, txn_id):
return self.txns.fetch_or_execute_request(
@@ -816,7 +817,7 @@ class RoomTypingRestServlet(RestServlet):
target_user=target_user, auth_user=requester.user, room_id=room_id
)
- return (200, {})
+ return 200, {}
class SearchRestServlet(RestServlet):
@@ -838,7 +839,7 @@ class SearchRestServlet(RestServlet):
requester.user, content, batch
)
- return (200, results)
+ return 200, results
class JoinedRoomsRestServlet(RestServlet):
@@ -854,7 +855,7 @@ class JoinedRoomsRestServlet(RestServlet):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
room_ids = yield self.store.get_rooms_for_user(requester.user.to_string())
- return (200, {"joined_rooms": list(room_ids)})
+ return 200, {"joined_rooms": list(room_ids)}
def register_txn_path(servlet, regex_string, http_server, with_get=False):
diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py
index 497cddf8b8..2afdbb89e5 100644
--- a/synapse/rest/client/v1/voip.py
+++ b/synapse/rest/client/v1/voip.py
@@ -60,7 +60,7 @@ class VoipRestServlet(RestServlet):
password = turnPassword
else:
- return (200, {})
+ return 200, {}
return (
200,
@@ -73,7 +73,7 @@ class VoipRestServlet(RestServlet):
)
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py
index e3d59ac3ac..8250ae0ae1 100644
--- a/synapse/rest/client/v2_alpha/_base.py
+++ b/synapse/rest/client/v2_alpha/_base.py
@@ -37,6 +37,7 @@ def client_patterns(path_regex, releases=(0,), unstable=True, v1=False):
SRE_Pattern
"""
patterns = []
+
if unstable:
unstable_prefix = CLIENT_API_PREFIX + "/unstable"
patterns.append(re.compile("^" + unstable_prefix + path_regex))
@@ -46,6 +47,7 @@ def client_patterns(path_regex, releases=(0,), unstable=True, v1=False):
for release in releases:
new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,)
patterns.append(re.compile("^" + new_prefix + path_regex))
+
return patterns
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 7ac456812a..ae82182a7a 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
-# Copyright 2018 New Vector Ltd
+# Copyright 2018, 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,15 +15,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+import re
from six.moves import http_client
-import jinja2
-
from twisted.internet import defer
from synapse.api.constants import LoginType
from synapse.api.errors import Codes, SynapseError, ThreepidValidationError
+from synapse.config.emailconfig import ThreepidBehaviour
from synapse.http.server import finish_request
from synapse.http.servlet import (
RestServlet,
@@ -31,8 +31,10 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
)
+from synapse.types import UserID
+from synapse.push.mailer import Mailer, load_jinja2_templates
from synapse.util.msisdn import phone_number_to_msisdn
-from synapse.util.stringutils import random_string
+from synapse.util.stringutils import assert_valid_client_secret, random_string
from synapse.util.threepids import check_3pid_allowed
from ._base import client_patterns, interactive_auth_handler
@@ -50,25 +52,28 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
self.config = hs.config
self.identity_handler = hs.get_handlers().identity_handler
- if self.config.email_password_reset_behaviour == "local":
- from synapse.push.mailer import Mailer, load_jinja2_templates
-
- templates = load_jinja2_templates(
- config=hs.config,
- template_html_name=hs.config.email_password_reset_template_html,
- template_text_name=hs.config.email_password_reset_template_text,
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ template_html, template_text = load_jinja2_templates(
+ self.config.email_template_dir,
+ [
+ self.config.email_password_reset_template_html,
+ self.config.email_password_reset_template_text,
+ ],
+ apply_format_ts_filter=True,
+ apply_mxc_to_http_filter=True,
+ public_baseurl=self.config.public_baseurl,
)
self.mailer = Mailer(
hs=self.hs,
app_name=self.config.email_app_name,
- template_html=templates[0],
- template_text=templates[1],
+ template_html=template_html,
+ template_text=template_text,
)
@defer.inlineCallbacks
def on_POST(self, request):
- if self.config.email_password_reset_behaviour == "off":
- if self.config.password_resets_were_disabled_due_to_email_config:
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warn(
"User password resets have been disabled due to lack of email config"
)
@@ -82,6 +87,8 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
# Extract params from body
client_secret = body["client_secret"]
+ assert_valid_client_secret(client_secret)
+
email = body["email"]
send_attempt = body["send_attempt"]
next_link = body.get("next_link") # Optional param
@@ -89,103 +96,49 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
if not check_3pid_allowed(self.hs, "email", email):
raise SynapseError(
403,
- "Your email domain is not authorized on this server",
+ "Your email is not authorized on this server",
Codes.THREEPID_DENIED,
)
- existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ existing_user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
"email", email
)
- if existingUid is None:
+ if existing_user_id is None:
raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
- if self.config.email_password_reset_behaviour == "remote":
- if "id_server" not in body:
- raise SynapseError(400, "Missing 'id_server' param in body")
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ # Have the configured identity server handle the request
+ if not self.hs.config.account_threepid_delegate_email:
+ logger.warn(
+ "No upstream email account_threepid_delegate configured on the server to "
+ "handle this request"
+ )
+ raise SynapseError(
+ 400, "Password reset by email is not supported on this homeserver"
+ )
- # Have the identity server handle the password reset flow
ret = yield self.identity_handler.requestEmailToken(
- body["id_server"], email, client_secret, send_attempt, next_link
+ self.hs.config.account_threepid_delegate_email,
+ email,
+ client_secret,
+ send_attempt,
+ next_link,
)
else:
# Send password reset emails from Synapse
- sid = yield self.send_password_reset(
- email, client_secret, send_attempt, next_link
+ sid = yield self.identity_handler.send_threepid_validation(
+ email,
+ client_secret,
+ send_attempt,
+ self.mailer.send_password_reset_mail,
+ next_link,
)
# Wrap the session id in a JSON object
ret = {"sid": sid}
- return (200, ret)
-
- @defer.inlineCallbacks
- def send_password_reset(self, email, client_secret, send_attempt, next_link=None):
- """Send a password reset email
-
- Args:
- email (str): The user's email address
- client_secret (str): The provided client secret
- send_attempt (int): Which send attempt this is
-
- Returns:
- The new session_id upon success
-
- Raises:
- SynapseError is an error occurred when sending the email
- """
- # Check that this email/client_secret/send_attempt combo is new or
- # greater than what we've seen previously
- session = yield self.datastore.get_threepid_validation_session(
- "email", client_secret, address=email, validated=False
- )
-
- # Check to see if a session already exists and that it is not yet
- # marked as validated
- if session and session.get("validated_at") is None:
- session_id = session["session_id"]
- last_send_attempt = session["last_send_attempt"]
-
- # Check that the send_attempt is higher than previous attempts
- if send_attempt <= last_send_attempt:
- # If not, just return a success without sending an email
- return session_id
- else:
- # An non-validated session does not exist yet.
- # Generate a session id
- session_id = random_string(16)
-
- # Generate a new validation token
- token = random_string(32)
-
- # Send the mail with the link containing the token, client_secret
- # and session_id
- try:
- yield self.mailer.send_password_reset_mail(
- email, token, client_secret, session_id
- )
- except Exception:
- logger.exception("Error sending a password reset email to %s", email)
- raise SynapseError(
- 500, "An error was encountered when sending the password reset email"
- )
-
- token_expires = (
- self.hs.clock.time_msec() + self.config.email_validation_token_lifetime
- )
-
- yield self.datastore.start_or_continue_validation_session(
- "email",
- email,
- session_id,
- client_secret,
- send_attempt,
- next_link,
- token,
- token_expires,
- )
-
- return session_id
+ return 200, ret
class MsisdnPasswordRequestTokenRestServlet(RestServlet):
@@ -202,26 +155,52 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
body = parse_json_object_from_request(request)
assert_params_in_dict(
- body,
- ["id_server", "client_secret", "country", "phone_number", "send_attempt"],
+ body, ["client_secret", "country", "phone_number", "send_attempt"]
)
+ client_secret = body["client_secret"]
+ country = body["country"]
+ phone_number = body["phone_number"]
+ send_attempt = body["send_attempt"]
+ next_link = body.get("next_link") # Optional param
- msisdn = phone_number_to_msisdn(body["country"], body["phone_number"])
+ msisdn = phone_number_to_msisdn(country, phone_number)
- if not check_3pid_allowed(self.hs, "msisdn", msisdn):
+ if not (yield check_3pid_allowed(self.hs, "msisdn", msisdn)):
raise SynapseError(
403,
"Account phone numbers are not authorized on this server",
Codes.THREEPID_DENIED,
)
- existingUid = yield self.datastore.get_user_id_by_threepid("msisdn", msisdn)
+ assert_valid_client_secret(body["client_secret"])
- if existingUid is None:
+ existing_user_id = yield self.datastore.get_user_id_by_threepid(
+ "msisdn", msisdn
+ )
+
+ if existing_user_id is None:
raise SynapseError(400, "MSISDN not found", Codes.THREEPID_NOT_FOUND)
- ret = yield self.identity_handler.requestMsisdnToken(**body)
- return (200, ret)
+ if not self.hs.config.account_threepid_delegate_msisdn:
+ logger.warn(
+ "No upstream msisdn account_threepid_delegate configured on the server to "
+ "handle this request"
+ )
+ raise SynapseError(
+ 400,
+ "Password reset by phone number is not supported on this homeserver",
+ )
+
+ ret = yield self.identity_handler.requestMsisdnToken(
+ self.hs.config.account_threepid_delegate_msisdn,
+ country,
+ phone_number,
+ client_secret,
+ send_attempt,
+ next_link,
+ )
+
+ return 200, ret
class PasswordResetSubmitTokenServlet(RestServlet):
@@ -241,31 +220,35 @@ class PasswordResetSubmitTokenServlet(RestServlet):
self.auth = hs.get_auth()
self.config = hs.config
self.clock = hs.get_clock()
- self.datastore = hs.get_datastore()
+ self.store = hs.get_datastore()
@defer.inlineCallbacks
def on_GET(self, request, medium):
+ # We currently only handle threepid token submissions for email
if medium != "email":
raise SynapseError(
400, "This medium is currently not supported for password resets"
)
- if self.config.email_password_reset_behaviour == "off":
- if self.config.password_resets_were_disabled_due_to_email_config:
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warn(
- "User password resets have been disabled due to lack of email config"
+ "Password reset emails have been disabled due to lack of an email config"
)
raise SynapseError(
- 400, "Email-based password resets have been disabled on this server"
+ 400, "Email-based password resets are disabled on this server"
)
- sid = parse_string(request, "sid")
- client_secret = parse_string(request, "client_secret")
- token = parse_string(request, "token")
+ sid = parse_string(request, "sid", required=True)
+ client_secret = parse_string(request, "client_secret", required=True)
- # Attempt to validate a 3PID sesssion
+ assert_valid_client_secret(client_secret)
+
+ token = parse_string(request, "token", required=True)
+
+ # Attempt to validate a 3PID session
try:
# Mark the session as valid
- next_link = yield self.datastore.validate_threepid_session(
+ next_link = yield self.store.validate_threepid_session(
sid, client_secret, token, self.clock.time_msec()
)
@@ -282,38 +265,22 @@ class PasswordResetSubmitTokenServlet(RestServlet):
return None
# Otherwise show the success template
- html = self.config.email_password_reset_success_html_content
+ html = self.config.email_password_reset_template_success_html
request.setResponseCode(200)
except ThreepidValidationError as e:
+ request.setResponseCode(e.code)
+
# Show a failure page with a reason
- html = self.load_jinja2_template(
+ html_template, = load_jinja2_templates(
self.config.email_template_dir,
- self.config.email_password_reset_failure_template,
- template_vars={"failure_reason": e.msg},
+ [self.config.email_password_reset_template_failure_html],
)
- request.setResponseCode(e.code)
+
+ template_vars = {"failure_reason": e.msg}
+ html = html_template.render(**template_vars)
request.write(html.encode("utf-8"))
finish_request(request)
- return None
-
- def load_jinja2_template(self, template_dir, template_filename, template_vars):
- """Loads a jinja2 template with variables to insert
-
- Args:
- template_dir (str): The directory where templates are stored
- template_filename (str): The name of the template in the template_dir
- template_vars (Dict): Dictionary of keys in the template
- alongside their values to insert
-
- Returns:
- str containing the contents of the rendered template
- """
- loader = jinja2.FileSystemLoader(template_dir)
- env = jinja2.Environment(loader=loader)
-
- template = env.get_template(template_filename)
- return template.render(**template_vars)
@defer.inlineCallbacks
def on_POST(self, request, medium):
@@ -325,12 +292,14 @@ class PasswordResetSubmitTokenServlet(RestServlet):
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["sid", "client_secret", "token"])
- valid, _ = yield self.datastore.validate_threepid_validation_token(
+ assert_valid_client_secret(body["client_secret"])
+
+ valid, _ = yield self.store.validate_threepid_session(
body["sid"], body["client_secret"], body["token"], self.clock.time_msec()
)
response_code = 200 if valid else 400
- return (response_code, {"success": valid})
+ return response_code, {"success": valid}
class PasswordRestServlet(RestServlet):
@@ -343,6 +312,7 @@ class PasswordRestServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
self.datastore = self.hs.get_datastore()
self._set_password_handler = hs.get_set_password_handler()
+ self.http_client = hs.get_simple_http_client()
@interactive_auth_handler
@defer.inlineCallbacks
@@ -361,9 +331,13 @@ class PasswordRestServlet(RestServlet):
if self.auth.has_access_token(request):
requester = yield self.auth.get_user_by_req(request)
- params = yield self.auth_handler.validate_user_via_ui_auth(
- requester, body, self.hs.get_ip_from_request(request)
- )
+ # blindly trust ASes without UI-authing them
+ if requester.app_service:
+ params = body
+ else:
+ params = yield self.auth_handler.validate_user_via_ui_auth(
+ requester, body, self.hs.get_ip_from_request(request)
+ )
user_id = requester.user.to_string()
else:
requester = None
@@ -371,7 +345,6 @@ class PasswordRestServlet(RestServlet):
[[LoginType.EMAIL_IDENTITY], [LoginType.MSISDN]],
body,
self.hs.get_ip_from_request(request),
- password_servlet=True,
)
if LoginType.EMAIL_IDENTITY in result:
@@ -399,11 +372,29 @@ class PasswordRestServlet(RestServlet):
yield self._set_password_handler.set_password(user_id, new_password, requester)
- return (200, {})
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(
+ requester.user.localpart, self.hs.config.shadow_server.get("hs")
+ )
+ self.shadow_password(params, shadow_user.to_string())
+
+ return 200, {}
def on_OPTIONS(self, _):
return 200, {}
+ @defer.inlineCallbacks
+ def shadow_password(self, body, user_id):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.post_json_get_json(
+ "%s/_matrix/client/r0/account/password?access_token=%s&user_id=%s"
+ % (shadow_hs_url, as_token, user_id),
+ body,
+ )
+
class DeactivateAccountRestServlet(RestServlet):
PATTERNS = client_patterns("/account/deactivate$")
@@ -434,7 +425,7 @@ class DeactivateAccountRestServlet(RestServlet):
yield self._deactivate_account_handler.deactivate_account(
requester.user.to_string(), erase
)
- return (200, {})
+ return 200, {}
yield self.auth_handler.validate_user_via_ui_auth(
requester, body, self.hs.get_ip_from_request(request)
@@ -447,17 +438,18 @@ class DeactivateAccountRestServlet(RestServlet):
else:
id_server_unbind_result = "no-support"
- return (200, {"id_server_unbind_result": id_server_unbind_result})
+ return 200, {"id_server_unbind_result": id_server_unbind_result}
class EmailThreepidRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/email/requestToken$")
def __init__(self, hs):
- self.hs = hs
super(EmailThreepidRequestTokenRestServlet, self).__init__()
+ self.hs = hs
+ self.config = hs.config
self.identity_handler = hs.get_handlers().identity_handler
- self.datastore = self.hs.get_datastore()
+ self.store = self.hs.get_datastore()
@defer.inlineCallbacks
def on_POST(self, request):
@@ -465,23 +457,32 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
assert_params_in_dict(
body, ["id_server", "client_secret", "email", "send_attempt"]
)
+ id_server = "https://" + body["id_server"] # Assume https
+ client_secret = body["client_secret"]
+ email = body["email"]
+ send_attempt = body["send_attempt"]
+ next_link = body.get("next_link") # Optional param
- if not check_3pid_allowed(self.hs, "email", body["email"]):
+ if not (yield check_3pid_allowed(self.hs, "email", email)):
raise SynapseError(
403,
- "Your email domain is not authorized on this server",
+ "Your email is not authorized on this server",
Codes.THREEPID_DENIED,
)
- existingUid = yield self.datastore.get_user_id_by_threepid(
+ assert_valid_client_secret(body["client_secret"])
+
+ existing_user_id = yield self.store.get_user_id_by_threepid(
"email", body["email"]
)
- if existingUid is not None:
+ if existing_user_id is not None:
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
- ret = yield self.identity_handler.requestEmailToken(**body)
- return (200, ret)
+ ret = yield self.identity_handler.requestEmailToken(
+ id_server, email, client_secret, send_attempt, next_link
+ )
+ return 200, ret
class MsisdnThreepidRequestTokenRestServlet(RestServlet):
@@ -490,8 +491,8 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
def __init__(self, hs):
self.hs = hs
super(MsisdnThreepidRequestTokenRestServlet, self).__init__()
+ self.store = self.hs.get_datastore()
self.identity_handler = hs.get_handlers().identity_handler
- self.datastore = self.hs.get_datastore()
@defer.inlineCallbacks
def on_POST(self, request):
@@ -500,23 +501,33 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
body,
["id_server", "client_secret", "country", "phone_number", "send_attempt"],
)
+ id_server = "https://" + body["id_server"] # Assume https
+ client_secret = body["client_secret"]
+ country = body["country"]
+ phone_number = body["phone_number"]
+ send_attempt = body["send_attempt"]
+ next_link = body.get("next_link") # Optional param
- msisdn = phone_number_to_msisdn(body["country"], body["phone_number"])
+ msisdn = phone_number_to_msisdn(country, phone_number)
- if not check_3pid_allowed(self.hs, "msisdn", msisdn):
+ if not (yield check_3pid_allowed(self.hs, "msisdn", msisdn)):
raise SynapseError(
403,
"Account phone numbers are not authorized on this server",
Codes.THREEPID_DENIED,
)
- existingUid = yield self.datastore.get_user_id_by_threepid("msisdn", msisdn)
+ assert_valid_client_secret(body["client_secret"])
+
+ existing_user_id = yield self.store.get_user_id_by_threepid("msisdn", msisdn)
- if existingUid is not None:
+ if existing_user_id is not None:
raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE)
- ret = yield self.identity_handler.requestMsisdnToken(**body)
- return (200, ret)
+ ret = yield self.identity_handler.requestMsisdnToken(
+ id_server, country, phone_number, client_secret, send_attempt, next_link
+ )
+ return 200, ret
class ThreepidRestServlet(RestServlet):
@@ -528,7 +539,8 @@ class ThreepidRestServlet(RestServlet):
self.identity_handler = hs.get_handlers().identity_handler
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
- self.datastore = self.hs.get_datastore()
+ self.datastore = hs.get_datastore()
+ self.http_client = hs.get_simple_http_client()
@defer.inlineCallbacks
def on_GET(self, request):
@@ -536,39 +548,105 @@ class ThreepidRestServlet(RestServlet):
threepids = yield self.datastore.user_get_threepids(requester.user.to_string())
- return (200, {"threepids": threepids})
+ return 200, {"threepids": threepids}
@defer.inlineCallbacks
def on_POST(self, request):
- body = parse_json_object_from_request(request)
+ if self.hs.config.disable_3pid_changes:
+ raise SynapseError(400, "3PID changes disabled on this server")
- threePidCreds = body.get("threePidCreds")
- threePidCreds = body.get("three_pid_creds", threePidCreds)
- if threePidCreds is None:
- raise SynapseError(400, "Missing param", Codes.MISSING_PARAM)
+ body = parse_json_object_from_request(request)
requester = yield self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
- threepid = yield self.identity_handler.threepid_from_creds(threePidCreds)
+ # skip validation if this is a shadow 3PID from an AS
+ if not requester.app_service:
+ threepid_creds = body.get("threePidCreds") or body.get("three_pid_creds")
+ if threepid_creds is None:
+ raise SynapseError(400, "Missing param", Codes.MISSING_PARAM)
+
+ requester = yield self.auth.get_user_by_req(request)
+ user_id = requester.user.to_string()
+
+ # Specify None as the identity server to retrieve it from the request body instead
+ threepid = yield self.identity_handler.threepid_from_creds(None, threepid_creds)
- if not threepid:
- raise SynapseError(400, "Failed to auth 3pid", Codes.THREEPID_AUTH_FAILED)
+ if not threepid:
+ raise SynapseError(
+ 400, "Failed to auth 3pid", Codes.THREEPID_AUTH_FAILED
+ )
- for reqd in ["medium", "address", "validated_at"]:
- if reqd not in threepid:
- logger.warn("Couldn't add 3pid: invalid response from ID server")
- raise SynapseError(500, "Invalid response from ID Server")
+ for reqd in ["medium", "address", "validated_at"]:
+ if reqd not in threepid:
+ logger.warn("Couldn't add 3pid: invalid response from ID server")
+ raise SynapseError(500, "Invalid response from ID Server")
+ else:
+ # XXX: ASes pass in a validated threepid directly to bypass the IS.
+ # This makes the API entirely change shape when we have an AS token;
+ # it really should be an entirely separate API - perhaps
+ # /account/3pid/replicate or something.
+ threepid = body.get("threepid")
yield self.auth_handler.add_threepid(
user_id, threepid["medium"], threepid["address"], threepid["validated_at"]
)
- if "bind" in body and body["bind"]:
+ if not requester.app_service and ("bind" in body and body["bind"]):
logger.debug("Binding threepid %s to %s", threepid, user_id)
- yield self.identity_handler.bind_threepid(threePidCreds, user_id)
+ yield self.identity_handler.bind_threepid(threepid_creds, user_id)
+
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(
+ requester.user.localpart, self.hs.config.shadow_server.get("hs")
+ )
+ self.shadow_3pid({"threepid": threepid}, shadow_user.to_string())
+
+ return 200, {}
+
+ @defer.inlineCallbacks
+ def shadow_3pid(self, body, user_id):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.post_json_get_json(
+ "%s/_matrix/client/r0/account/3pid?access_token=%s&user_id=%s"
+ % (shadow_hs_url, as_token, user_id),
+ body,
+ )
+
+
+class ThreepidUnbindRestServlet(RestServlet):
+ PATTERNS = client_patterns("/account/3pid/unbind$")
+
+ def __init__(self, hs):
+ super(ThreepidUnbindRestServlet, self).__init__()
+ self.hs = hs
+ self.identity_handler = hs.get_handlers().identity_handler
+ self.auth = hs.get_auth()
+ self.datastore = self.hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ """Unbind the given 3pid from a specific identity server, or identity servers that are
+ known to have this 3pid bound
+ """
+ requester = yield self.auth.get_user_by_req(request)
+ body = parse_json_object_from_request(request)
+ assert_params_in_dict(body, ["medium", "address"])
+
+ medium = body.get("medium")
+ address = body.get("address")
+ id_server = body.get("id_server")
- return (200, {})
+ # Attempt to unbind the threepid from an identity server. If id_server is None, try to
+ # unbind from all identity servers this threepid has been added to in the past
+ result = yield self.identity_handler.try_unbind_threepid(
+ requester.user.to_string(),
+ {"address": address, "medium": medium, "id_server": id_server},
+ )
+ return 200, {"id_server_unbind_result": "success" if result else "no-support"}
class ThreepidDeleteRestServlet(RestServlet):
@@ -576,11 +654,16 @@ class ThreepidDeleteRestServlet(RestServlet):
def __init__(self, hs):
super(ThreepidDeleteRestServlet, self).__init__()
+ self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
+ self.http_client = hs.get_simple_http_client()
@defer.inlineCallbacks
def on_POST(self, request):
+ if self.hs.config.disable_3pid_changes:
+ raise SynapseError(400, "3PID changes disabled on this server")
+
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["medium", "address"])
@@ -598,12 +681,89 @@ class ThreepidDeleteRestServlet(RestServlet):
logger.exception("Failed to remove threepid")
raise SynapseError(500, "Failed to remove threepid")
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(
+ requester.user.localpart, self.hs.config.shadow_server.get("hs")
+ )
+ self.shadow_3pid_delete(body, shadow_user.to_string())
+
if ret:
id_server_unbind_result = "success"
else:
id_server_unbind_result = "no-support"
- return (200, {"id_server_unbind_result": id_server_unbind_result})
+ return 200, {"id_server_unbind_result": id_server_unbind_result}
+
+ @defer.inlineCallbacks
+ def shadow_3pid_delete(self, body, user_id):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.post_json_get_json(
+ "%s/_matrix/client/r0/account/3pid/delete?access_token=%s&user_id=%s"
+ % (shadow_hs_url, as_token, user_id),
+ body,
+ )
+
+
+class ThreepidLookupRestServlet(RestServlet):
+ PATTERNS = [re.compile("^/_matrix/client/unstable/account/3pid/lookup$")]
+
+ def __init__(self, hs):
+ super(ThreepidLookupRestServlet, self).__init__()
+ self.auth = hs.get_auth()
+ self.identity_handler = hs.get_handlers().identity_handler
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ """Proxy a /_matrix/identity/api/v1/lookup request to an identity
+ server
+ """
+ yield self.auth.get_user_by_req(request)
+
+ # Verify query parameters
+ query_params = request.args
+ assert_params_in_dict(query_params, [b"medium", b"address", b"id_server"])
+
+ # Retrieve needed information from query parameters
+ medium = parse_string(request, "medium")
+ address = parse_string(request, "address")
+ id_server = parse_string(request, "id_server")
+
+ # Proxy the request to the identity server. lookup_3pid handles checking
+ # if the lookup is allowed so we don't need to do it here.
+ ret = yield self.identity_handler.lookup_3pid(id_server, medium, address)
+
+ defer.returnValue((200, ret))
+
+
+class ThreepidBulkLookupRestServlet(RestServlet):
+ PATTERNS = [re.compile("^/_matrix/client/unstable/account/3pid/bulk_lookup$")]
+
+ def __init__(self, hs):
+ super(ThreepidBulkLookupRestServlet, self).__init__()
+ self.auth = hs.get_auth()
+ self.identity_handler = hs.get_handlers().identity_handler
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ """Proxy a /_matrix/identity/api/v1/bulk_lookup request to an identity
+ server
+ """
+ yield self.auth.get_user_by_req(request)
+
+ body = parse_json_object_from_request(request)
+
+ assert_params_in_dict(body, ["threepids", "id_server"])
+
+ # Proxy the request to the identity server. lookup_3pid handles checking
+ # if the lookup is allowed so we don't need to do it here.
+ ret = yield self.identity_handler.bulk_lookup_3pid(
+ body["id_server"], body["threepids"]
+ )
+
+ defer.returnValue((200, ret))
class WhoamiRestServlet(RestServlet):
@@ -617,7 +777,7 @@ class WhoamiRestServlet(RestServlet):
def on_GET(self, request):
requester = yield self.auth.get_user_by_req(request)
- return (200, {"user_id": requester.user.to_string()})
+ return 200, {"user_id": requester.user.to_string()}
def register_servlets(hs, http_server):
@@ -629,5 +789,8 @@ def register_servlets(hs, http_server):
EmailThreepidRequestTokenRestServlet(hs).register(http_server)
MsisdnThreepidRequestTokenRestServlet(hs).register(http_server)
ThreepidRestServlet(hs).register(http_server)
+ ThreepidUnbindRestServlet(hs).register(http_server)
ThreepidDeleteRestServlet(hs).register(http_server)
+ ThreepidLookupRestServlet(hs).register(http_server)
+ ThreepidBulkLookupRestServlet(hs).register(http_server)
WhoamiRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py
index 98f2f6f4b5..34ade04ff8 100644
--- a/synapse/rest/client/v2_alpha/account_data.py
+++ b/synapse/rest/client/v2_alpha/account_data.py
@@ -19,6 +19,7 @@ from twisted.internet import defer
from synapse.api.errors import AuthError, NotFoundError, SynapseError
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.types import UserID
from ._base import client_patterns
@@ -40,6 +41,7 @@ class AccountDataServlet(RestServlet):
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.notifier = hs.get_notifier()
+ self._profile_handler = hs.get_profile_handler()
@defer.inlineCallbacks
def on_PUT(self, request, user_id, account_data_type):
@@ -49,13 +51,18 @@ class AccountDataServlet(RestServlet):
body = parse_json_object_from_request(request)
+ if account_data_type == "im.vector.hide_profile":
+ user = UserID.from_string(user_id)
+ hide_profile = body.get("hide_profile")
+ yield self._profile_handler.set_active(user, not hide_profile, True)
+
max_id = yield self.store.add_account_data_for_user(
user_id, account_data_type, body
)
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_GET(self, request, user_id, account_data_type):
@@ -70,7 +77,7 @@ class AccountDataServlet(RestServlet):
if event is None:
raise NotFoundError("Account data not found")
- return (200, event)
+ return 200, event
class RoomAccountDataServlet(RestServlet):
@@ -112,7 +119,7 @@ class RoomAccountDataServlet(RestServlet):
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_GET(self, request, user_id, room_id, account_data_type):
@@ -127,7 +134,7 @@ class RoomAccountDataServlet(RestServlet):
if event is None:
raise NotFoundError("Room account data not found")
- return (200, event)
+ return 200, event
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py
index 33f6a23028..9a62f938af 100644
--- a/synapse/rest/client/v2_alpha/account_validity.py
+++ b/synapse/rest/client/v2_alpha/account_validity.py
@@ -15,6 +15,8 @@
import logging
+from six import ensure_binary
+
from twisted.internet import defer
from synapse.api.errors import AuthError, SynapseError
diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py
index a4fa45fe11..acd58af193 100644
--- a/synapse/rest/client/v2_alpha/capabilities.py
+++ b/synapse/rest/client/v2_alpha/capabilities.py
@@ -58,7 +58,7 @@ class CapabilitiesRestServlet(RestServlet):
"m.change_password": {"enabled": change_password},
}
}
- return (200, response)
+ return 200, response
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py
index 9adf76cc0c..26d0235208 100644
--- a/synapse/rest/client/v2_alpha/devices.py
+++ b/synapse/rest/client/v2_alpha/devices.py
@@ -48,7 +48,7 @@ class DevicesRestServlet(RestServlet):
devices = yield self.device_handler.get_devices_by_user(
requester.user.to_string()
)
- return (200, {"devices": devices})
+ return 200, {"devices": devices}
class DeleteDevicesRestServlet(RestServlet):
@@ -91,7 +91,7 @@ class DeleteDevicesRestServlet(RestServlet):
yield self.device_handler.delete_devices(
requester.user.to_string(), body["devices"]
)
- return (200, {})
+ return 200, {}
class DeviceRestServlet(RestServlet):
@@ -114,7 +114,7 @@ class DeviceRestServlet(RestServlet):
device = yield self.device_handler.get_device(
requester.user.to_string(), device_id
)
- return (200, device)
+ return 200, device
@interactive_auth_handler
@defer.inlineCallbacks
@@ -137,7 +137,7 @@ class DeviceRestServlet(RestServlet):
)
yield self.device_handler.delete_device(requester.user.to_string(), device_id)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_PUT(self, request, device_id):
@@ -147,7 +147,7 @@ class DeviceRestServlet(RestServlet):
yield self.device_handler.update_device(
requester.user.to_string(), device_id, body
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py
index 22be0ee3c5..c6ddf24c8d 100644
--- a/synapse/rest/client/v2_alpha/filter.py
+++ b/synapse/rest/client/v2_alpha/filter.py
@@ -56,7 +56,7 @@ class GetFilterRestServlet(RestServlet):
user_localpart=target_user.localpart, filter_id=filter_id
)
- return (200, filter.get_filter_json())
+ return 200, filter.get_filter_json()
except (KeyError, StoreError):
raise SynapseError(400, "No such filter", errcode=Codes.NOT_FOUND)
@@ -89,7 +89,7 @@ class CreateFilterRestServlet(RestServlet):
user_localpart=target_user.localpart, user_filter=content
)
- return (200, {"filter_id": str(filter_id)})
+ return 200, {"filter_id": str(filter_id)}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py
index e629c4256d..999a0fa80c 100644
--- a/synapse/rest/client/v2_alpha/groups.py
+++ b/synapse/rest/client/v2_alpha/groups.py
@@ -47,7 +47,7 @@ class GroupServlet(RestServlet):
group_id, requester_user_id
)
- return (200, group_description)
+ return 200, group_description
@defer.inlineCallbacks
def on_POST(self, request, group_id):
@@ -59,7 +59,7 @@ class GroupServlet(RestServlet):
group_id, requester_user_id, content
)
- return (200, {})
+ return 200, {}
class GroupSummaryServlet(RestServlet):
@@ -83,7 +83,7 @@ class GroupSummaryServlet(RestServlet):
group_id, requester_user_id
)
- return (200, get_group_summary)
+ return 200, get_group_summary
class GroupSummaryRoomsCatServlet(RestServlet):
@@ -120,7 +120,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
content=content,
)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_DELETE(self, request, group_id, category_id, room_id):
@@ -131,7 +131,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
group_id, requester_user_id, room_id=room_id, category_id=category_id
)
- return (200, resp)
+ return 200, resp
class GroupCategoryServlet(RestServlet):
@@ -157,7 +157,7 @@ class GroupCategoryServlet(RestServlet):
group_id, requester_user_id, category_id=category_id
)
- return (200, category)
+ return 200, category
@defer.inlineCallbacks
def on_PUT(self, request, group_id, category_id):
@@ -169,7 +169,7 @@ class GroupCategoryServlet(RestServlet):
group_id, requester_user_id, category_id=category_id, content=content
)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_DELETE(self, request, group_id, category_id):
@@ -180,7 +180,7 @@ class GroupCategoryServlet(RestServlet):
group_id, requester_user_id, category_id=category_id
)
- return (200, resp)
+ return 200, resp
class GroupCategoriesServlet(RestServlet):
@@ -204,7 +204,7 @@ class GroupCategoriesServlet(RestServlet):
group_id, requester_user_id
)
- return (200, category)
+ return 200, category
class GroupRoleServlet(RestServlet):
@@ -228,7 +228,7 @@ class GroupRoleServlet(RestServlet):
group_id, requester_user_id, role_id=role_id
)
- return (200, category)
+ return 200, category
@defer.inlineCallbacks
def on_PUT(self, request, group_id, role_id):
@@ -240,7 +240,7 @@ class GroupRoleServlet(RestServlet):
group_id, requester_user_id, role_id=role_id, content=content
)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_DELETE(self, request, group_id, role_id):
@@ -251,7 +251,7 @@ class GroupRoleServlet(RestServlet):
group_id, requester_user_id, role_id=role_id
)
- return (200, resp)
+ return 200, resp
class GroupRolesServlet(RestServlet):
@@ -275,7 +275,7 @@ class GroupRolesServlet(RestServlet):
group_id, requester_user_id
)
- return (200, category)
+ return 200, category
class GroupSummaryUsersRoleServlet(RestServlet):
@@ -312,7 +312,7 @@ class GroupSummaryUsersRoleServlet(RestServlet):
content=content,
)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_DELETE(self, request, group_id, role_id, user_id):
@@ -323,7 +323,7 @@ class GroupSummaryUsersRoleServlet(RestServlet):
group_id, requester_user_id, user_id=user_id, role_id=role_id
)
- return (200, resp)
+ return 200, resp
class GroupRoomServlet(RestServlet):
@@ -347,7 +347,7 @@ class GroupRoomServlet(RestServlet):
group_id, requester_user_id
)
- return (200, result)
+ return 200, result
class GroupUsersServlet(RestServlet):
@@ -371,7 +371,7 @@ class GroupUsersServlet(RestServlet):
group_id, requester_user_id
)
- return (200, result)
+ return 200, result
class GroupInvitedUsersServlet(RestServlet):
@@ -395,7 +395,7 @@ class GroupInvitedUsersServlet(RestServlet):
group_id, requester_user_id
)
- return (200, result)
+ return 200, result
class GroupSettingJoinPolicyServlet(RestServlet):
@@ -420,7 +420,7 @@ class GroupSettingJoinPolicyServlet(RestServlet):
group_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupCreateServlet(RestServlet):
@@ -450,7 +450,7 @@ class GroupCreateServlet(RestServlet):
group_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupAdminRoomsServlet(RestServlet):
@@ -477,7 +477,7 @@ class GroupAdminRoomsServlet(RestServlet):
group_id, requester_user_id, room_id, content
)
- return (200, result)
+ return 200, result
@defer.inlineCallbacks
def on_DELETE(self, request, group_id, room_id):
@@ -488,7 +488,7 @@ class GroupAdminRoomsServlet(RestServlet):
group_id, requester_user_id, room_id
)
- return (200, result)
+ return 200, result
class GroupAdminRoomsConfigServlet(RestServlet):
@@ -516,7 +516,7 @@ class GroupAdminRoomsConfigServlet(RestServlet):
group_id, requester_user_id, room_id, config_key, content
)
- return (200, result)
+ return 200, result
class GroupAdminUsersInviteServlet(RestServlet):
@@ -546,7 +546,7 @@ class GroupAdminUsersInviteServlet(RestServlet):
group_id, user_id, requester_user_id, config
)
- return (200, result)
+ return 200, result
class GroupAdminUsersKickServlet(RestServlet):
@@ -573,7 +573,7 @@ class GroupAdminUsersKickServlet(RestServlet):
group_id, user_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupSelfLeaveServlet(RestServlet):
@@ -598,7 +598,7 @@ class GroupSelfLeaveServlet(RestServlet):
group_id, requester_user_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupSelfJoinServlet(RestServlet):
@@ -623,7 +623,7 @@ class GroupSelfJoinServlet(RestServlet):
group_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupSelfAcceptInviteServlet(RestServlet):
@@ -648,7 +648,7 @@ class GroupSelfAcceptInviteServlet(RestServlet):
group_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupSelfUpdatePublicityServlet(RestServlet):
@@ -672,7 +672,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet):
publicise = content["publicise"]
yield self.store.update_group_publicity(group_id, requester_user_id, publicise)
- return (200, {})
+ return 200, {}
class PublicisedGroupsForUserServlet(RestServlet):
@@ -694,7 +694,7 @@ class PublicisedGroupsForUserServlet(RestServlet):
result = yield self.groups_handler.get_publicised_groups_for_user(user_id)
- return (200, result)
+ return 200, result
class PublicisedGroupsForUsersServlet(RestServlet):
@@ -719,7 +719,7 @@ class PublicisedGroupsForUsersServlet(RestServlet):
result = yield self.groups_handler.bulk_get_publicised_groups(user_ids)
- return (200, result)
+ return 200, result
class GroupsForUserServlet(RestServlet):
@@ -741,7 +741,7 @@ class GroupsForUserServlet(RestServlet):
result = yield self.groups_handler.get_joined_groups(requester_user_id)
- return (200, result)
+ return 200, result
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py
index 6008adec7c..2e680134a0 100644
--- a/synapse/rest/client/v2_alpha/keys.py
+++ b/synapse/rest/client/v2_alpha/keys.py
@@ -24,6 +24,7 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
)
+from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.types import StreamToken
from ._base import client_patterns
@@ -68,6 +69,7 @@ class KeyUploadServlet(RestServlet):
self.auth = hs.get_auth()
self.e2e_keys_handler = hs.get_e2e_keys_handler()
+ @trace(opname="upload_keys")
@defer.inlineCallbacks
def on_POST(self, request, device_id):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
@@ -78,6 +80,14 @@ class KeyUploadServlet(RestServlet):
# passing the device_id here is deprecated; however, we allow it
# for now for compatibility with older clients.
if requester.device_id is not None and device_id != requester.device_id:
+ set_tag("error", True)
+ log_kv(
+ {
+ "message": "Client uploading keys for a different device",
+ "logged_in_id": requester.device_id,
+ "key_being_uploaded": device_id,
+ }
+ )
logger.warning(
"Client uploading keys for a different device "
"(logged in as %s, uploading for %s)",
@@ -95,7 +105,7 @@ class KeyUploadServlet(RestServlet):
result = yield self.e2e_keys_handler.upload_keys_for_user(
user_id, device_id, body
)
- return (200, result)
+ return 200, result
class KeyQueryServlet(RestServlet):
@@ -149,7 +159,7 @@ class KeyQueryServlet(RestServlet):
timeout = parse_integer(request, "timeout", 10 * 1000)
body = parse_json_object_from_request(request)
result = yield self.e2e_keys_handler.query_devices(body, timeout)
- return (200, result)
+ return 200, result
class KeyChangesServlet(RestServlet):
@@ -178,10 +188,11 @@ class KeyChangesServlet(RestServlet):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
from_token_string = parse_string(request, "from")
+ set_tag("from", from_token_string)
# We want to enforce they do pass us one, but we ignore it and return
# changes after the "to" as well as before.
- parse_string(request, "to")
+ set_tag("to", parse_string(request, "to"))
from_token = StreamToken.from_string(from_token_string)
@@ -189,7 +200,7 @@ class KeyChangesServlet(RestServlet):
results = yield self.device_handler.get_user_ids_changed(user_id, from_token)
- return (200, results)
+ return 200, results
class OneTimeKeyServlet(RestServlet):
@@ -224,7 +235,7 @@ class OneTimeKeyServlet(RestServlet):
timeout = parse_integer(request, "timeout", 10 * 1000)
body = parse_json_object_from_request(request)
result = yield self.e2e_keys_handler.claim_one_time_keys(body, timeout)
- return (200, result)
+ return 200, result
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py
index d034863a3c..10c1ad5b07 100644
--- a/synapse/rest/client/v2_alpha/notifications.py
+++ b/synapse/rest/client/v2_alpha/notifications.py
@@ -88,7 +88,7 @@ class NotificationsServlet(RestServlet):
returned_push_actions.append(returned_pa)
next_token = str(pa["stream_ordering"])
- return (200, {"notifications": returned_push_actions, "next_token": next_token})
+ return 200, {"notifications": returned_push_actions, "next_token": next_token}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/password_policy.py b/synapse/rest/client/v2_alpha/password_policy.py
new file mode 100644
index 0000000000..968403cca4
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/password_policy.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.http.servlet import RestServlet
+
+from ._base import client_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class PasswordPolicyServlet(RestServlet):
+ PATTERNS = client_patterns("/password_policy$")
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ super(PasswordPolicyServlet, self).__init__()
+
+ self.policy = hs.config.password_policy
+ self.enabled = hs.config.password_policy_enabled
+
+ def on_GET(self, request):
+ if not self.enabled or not self.policy:
+ return (200, {})
+
+ policy = {}
+
+ for param in [
+ "minimum_length",
+ "require_digit",
+ "require_symbol",
+ "require_lowercase",
+ "require_uppercase",
+ ]:
+ if param in self.policy:
+ policy["m.%s" % param] = self.policy[param]
+
+ return (200, policy)
+
+
+def register_servlets(hs, http_server):
+ PasswordPolicyServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py
index d93d6a9f24..b3bf8567e1 100644
--- a/synapse/rest/client/v2_alpha/read_marker.py
+++ b/synapse/rest/client/v2_alpha/read_marker.py
@@ -59,7 +59,7 @@ class ReadMarkerRestServlet(RestServlet):
event_id=read_marker_event_id,
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py
index 98a97b7059..0dab03d227 100644
--- a/synapse/rest/client/v2_alpha/receipts.py
+++ b/synapse/rest/client/v2_alpha/receipts.py
@@ -52,7 +52,7 @@ class ReceiptRestServlet(RestServlet):
room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 05ea1459e3..c48cf017a3 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
-# Copyright 2015 - 2016 OpenMarket Ltd
-# Copyright 2017 Vector Creations Ltd
+# Copyright 2015-2016 OpenMarket Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +17,7 @@
import hmac
import logging
-from hashlib import sha1
+import re
from six import string_types
@@ -29,18 +30,23 @@ from synapse.api.errors import (
Codes,
LimitExceededError,
SynapseError,
+ ThreepidValidationError,
UnrecognizedRequestError,
)
+from synapse.config.emailconfig import ThreepidBehaviour
from synapse.config.ratelimiting import FederationRateLimitConfig
from synapse.config.server import is_threepid_reserved
+from synapse.http.server import finish_request
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
parse_string,
)
+from synapse.push.mailer import load_jinja2_templates
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.ratelimitutils import FederationRateLimiter
+from synapse.util.stringutils import assert_valid_client_secret
from synapse.util.threepids import check_3pid_allowed
from ._base import client_patterns, interactive_auth_handler
@@ -71,31 +77,93 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
super(EmailRegisterRequestTokenRestServlet, self).__init__()
self.hs = hs
self.identity_handler = hs.get_handlers().identity_handler
+ self.config = hs.config
+
+ if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ from synapse.push.mailer import Mailer, load_jinja2_templates
+
+ template_html, template_text = load_jinja2_templates(
+ self.config.email_template_dir,
+ [
+ self.config.email_registration_template_html,
+ self.config.email_registration_template_text,
+ ],
+ apply_format_ts_filter=True,
+ apply_mxc_to_http_filter=True,
+ public_baseurl=self.config.public_baseurl,
+ )
+ self.mailer = Mailer(
+ hs=self.hs,
+ app_name=self.config.email_app_name,
+ template_html=template_html,
+ template_text=template_text,
+ )
@defer.inlineCallbacks
def on_POST(self, request):
+ if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.hs.config.local_threepid_handling_disabled_due_to_email_config:
+ logger.warn(
+ "Email registration has been disabled due to lack of email config"
+ )
+ raise SynapseError(
+ 400, "Email-based registration has been disabled on this server"
+ )
body = parse_json_object_from_request(request)
- assert_params_in_dict(
- body, ["id_server", "client_secret", "email", "send_attempt"]
- )
+ assert_params_in_dict(body, ["client_secret", "email", "send_attempt"])
- if not check_3pid_allowed(self.hs, "email", body["email"]):
+ # Extract params from body
+ client_secret = body["client_secret"]
+ email = body["email"]
+ send_attempt = body["send_attempt"]
+ next_link = body.get("next_link") # Optional param
+
+ if not (yield check_3pid_allowed(self.hs, "email", body["email"])):
raise SynapseError(
403,
- "Your email domain is not authorized to register on this server",
+ "Your email is not authorized to register on this server",
Codes.THREEPID_DENIED,
)
- existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ existing_user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
"email", body["email"]
)
- if existingUid is not None:
+ if existing_user_id is not None:
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
- ret = yield self.identity_handler.requestEmailToken(**body)
- return (200, ret)
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ if not self.hs.config.account_threepid_delegate_email:
+ logger.warn(
+ "No upstream email account_threepid_delegate configured on the server to "
+ "handle this request"
+ )
+ raise SynapseError(
+ 400, "Registration by email is not supported on this homeserver"
+ )
+
+ ret = yield self.identity_handler.requestEmailToken(
+ self.hs.config.account_threepid_delegate_email,
+ email,
+ client_secret,
+ send_attempt,
+ next_link,
+ )
+ else:
+ # Send registration emails from Synapse
+ sid = yield self.identity_handler.send_threepid_validation(
+ email,
+ client_secret,
+ send_attempt,
+ self.mailer.send_registration_mail,
+ next_link,
+ )
+
+ # Wrap the session id in a JSON object
+ ret = {"sid": sid}
+
+ return 200, ret
class MsisdnRegisterRequestTokenRestServlet(RestServlet):
@@ -115,30 +183,131 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
body = parse_json_object_from_request(request)
assert_params_in_dict(
- body,
- ["id_server", "client_secret", "country", "phone_number", "send_attempt"],
+ body, ["client_secret", "country", "phone_number", "send_attempt"]
)
+ client_secret = body["client_secret"]
+ country = body["country"]
+ phone_number = body["phone_number"]
+ send_attempt = body["send_attempt"]
+ next_link = body.get("next_link") # Optional param
- msisdn = phone_number_to_msisdn(body["country"], body["phone_number"])
+ msisdn = phone_number_to_msisdn(country, phone_number)
- if not check_3pid_allowed(self.hs, "msisdn", msisdn):
+ assert_valid_client_secret(body["client_secret"])
+
+ if not (yield check_3pid_allowed(self.hs, "msisdn", msisdn)):
raise SynapseError(
403,
"Phone numbers are not authorized to register on this server",
Codes.THREEPID_DENIED,
)
- existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ existing_user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
"msisdn", msisdn
)
- if existingUid is not None:
+ if existing_user_id is not None:
raise SynapseError(
400, "Phone number is already in use", Codes.THREEPID_IN_USE
)
- ret = yield self.identity_handler.requestMsisdnToken(**body)
- return (200, ret)
+ if not self.hs.config.account_threepid_delegate_msisdn:
+ logger.warn(
+ "No upstream msisdn account_threepid_delegate configured on the server to "
+ "handle this request"
+ )
+ raise SynapseError(
+ 400, "Registration by phone number is not supported on this homeserver"
+ )
+
+ ret = yield self.identity_handler.requestMsisdnToken(
+ self.hs.config.account_threepid_delegate_msisdn,
+ country,
+ phone_number,
+ client_secret,
+ send_attempt,
+ next_link,
+ )
+
+ return 200, ret
+
+
+class RegistrationSubmitTokenServlet(RestServlet):
+ """Handles registration 3PID validation token submission"""
+
+ PATTERNS = client_patterns(
+ "/registration/(?P<medium>[^/]*)/submit_token$", releases=(), unstable=True
+ )
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ super(RegistrationSubmitTokenServlet, self).__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.config = hs.config
+ self.clock = hs.get_clock()
+ self.store = hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, medium):
+ if medium != "email":
+ raise SynapseError(
+ 400, "This medium is currently not supported for registration"
+ )
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.local_threepid_handling_disabled_due_to_email_config:
+ logger.warn(
+ "User registration via email has been disabled due to lack of email config"
+ )
+ raise SynapseError(
+ 400, "Email-based registration is disabled on this server"
+ )
+
+ sid = parse_string(request, "sid", required=True)
+ client_secret = parse_string(request, "client_secret", required=True)
+ token = parse_string(request, "token", required=True)
+
+ # Attempt to validate a 3PID session
+ try:
+ # Mark the session as valid
+ next_link = yield self.store.validate_threepid_session(
+ sid, client_secret, token, self.clock.time_msec()
+ )
+
+ # Perform a 302 redirect if next_link is set
+ if next_link:
+ if next_link.startswith("file:///"):
+ logger.warn(
+ "Not redirecting to next_link as it is a local file: address"
+ )
+ else:
+ request.setResponseCode(302)
+ request.setHeader("Location", next_link)
+ finish_request(request)
+ return None
+
+ # Otherwise show the success template
+ html = self.config.email_registration_template_success_html_content
+
+ request.setResponseCode(200)
+ except ThreepidValidationError as e:
+ # Show a failure page with a reason
+ request.setResponseCode(e.code)
+
+ # Show a failure page with a reason
+ html_template, = load_jinja2_templates(
+ self.config.email_template_dir,
+ [self.config.email_registration_template_failure_html],
+ )
+
+ template_vars = {"failure_reason": e.msg}
+ html = html_template.render(**template_vars)
+
+ request.write(html.encode("utf-8"))
+ finish_request(request)
class UsernameAvailabilityRestServlet(RestServlet):
@@ -178,7 +347,7 @@ class UsernameAvailabilityRestServlet(RestServlet):
yield self.registration_handler.check_username(username)
- return (200, {"available": True})
+ return 200, {"available": True}
class RegisterRestServlet(RestServlet):
@@ -200,6 +369,7 @@ class RegisterRestServlet(RestServlet):
self.room_member_handler = hs.get_room_member_handler()
self.macaroon_gen = hs.get_macaroon_generator()
self.ratelimiter = hs.get_registration_ratelimiter()
+ self.password_policy_handler = hs.get_password_policy_handler()
self.clock = hs.get_clock()
@interactive_auth_handler
@@ -231,7 +401,6 @@ class RegisterRestServlet(RestServlet):
if kind == b"guest":
ret = yield self._do_guest_registration(body, address=client_addr)
return ret
- return
elif kind != b"user":
raise UnrecognizedRequestError(
"Do not understand membership kind: %s" % (kind,)
@@ -246,6 +415,7 @@ class RegisterRestServlet(RestServlet):
or len(body["password"]) > 512
):
raise SynapseError(400, "Invalid password")
+ self.password_policy_handler.validate_password(body["password"])
desired_password = body["password"]
desired_username = None
@@ -257,12 +427,14 @@ class RegisterRestServlet(RestServlet):
raise SynapseError(400, "Invalid username")
desired_username = body["username"]
+ desired_display_name = body.get("display_name")
+
appservice = None
if self.auth.has_access_token(request):
appservice = yield self.auth.get_appservice_by_req(request)
- # fork off as soon as possible for ASes and shared secret auth which
- # have completely different registration flows to normal users
+ # fork off as soon as possible for ASes which have completely
+ # different registration flows to normal users
# == Application Service Registration ==
if appservice:
@@ -280,13 +452,16 @@ class RegisterRestServlet(RestServlet):
if isinstance(desired_username, string_types):
result = yield self._do_appservice_registration(
- desired_username, access_token, body
+ desired_username,
+ desired_password,
+ desired_display_name,
+ access_token,
+ body,
)
- return (200, result) # we throw for non 200 responses
- return
+ return 200, result # we throw for non 200 responses
- # for either shared secret or regular registration, downcase the
- # provided username before attempting to register it. This should mean
+ # for regular registration, downcase the provided username before
+ # attempting to register it. This should mean
# that people who try to register with upper-case in their usernames
# don't get a nasty surprise. (Note that we treat username
# case-insenstively in login, so they are free to carry on imagining
@@ -294,16 +469,6 @@ class RegisterRestServlet(RestServlet):
if desired_username is not None:
desired_username = desired_username.lower()
- # == Shared Secret Registration == (e.g. create new user scripts)
- if "mac" in body:
- # FIXME: Should we really be determining if this is shared secret
- # auth based purely on the 'mac' key?
- result = yield self._do_shared_secret_registration(
- desired_username, desired_password, body
- )
- return (200, result) # we throw for non 200 responses
- return
-
# == Normal User Registration == (everyone else)
if not self.hs.config.enable_registration:
raise SynapseError(403, "Registration has been disabled")
@@ -413,7 +578,7 @@ class RegisterRestServlet(RestServlet):
medium = auth_result[login_type]["medium"]
address = auth_result[login_type]["address"]
- if not check_3pid_allowed(self.hs, medium, address):
+ if not (yield check_3pid_allowed(self.hs, medium, address)):
raise SynapseError(
403,
"Third party identifiers (email/phone numbers)"
@@ -421,6 +586,80 @@ class RegisterRestServlet(RestServlet):
Codes.THREEPID_DENIED,
)
+ existingUid = yield self.store.get_user_id_by_threepid(
+ medium, address
+ )
+
+ if existingUid is not None:
+ raise SynapseError(
+ 400, "%s is already in use" % medium, Codes.THREEPID_IN_USE
+ )
+
+ if self.hs.config.register_mxid_from_3pid:
+ # override the desired_username based on the 3PID if any.
+ # reset it first to avoid folks picking their own username.
+ desired_username = None
+
+ # we should have an auth_result at this point if we're going to progress
+ # to register the user (i.e. we haven't picked up a registered_user_id
+ # from our session store), in which case get ready and gen the
+ # desired_username
+ if auth_result:
+ if (
+ self.hs.config.register_mxid_from_3pid == "email"
+ and LoginType.EMAIL_IDENTITY in auth_result
+ ):
+ address = auth_result[LoginType.EMAIL_IDENTITY]["address"]
+ desired_username = synapse.types.strip_invalid_mxid_characters(
+ address.replace("@", "-").lower()
+ )
+
+ # find a unique mxid for the account, suffixing numbers
+ # if needed
+ while True:
+ try:
+ yield self.registration_handler.check_username(
+ desired_username,
+ guest_access_token=guest_access_token,
+ assigned_user_id=registered_user_id,
+ )
+ # if we got this far we passed the check.
+ break
+ except SynapseError as e:
+ if e.errcode == Codes.USER_IN_USE:
+ m = re.match(r"^(.*?)(\d+)$", desired_username)
+ if m:
+ desired_username = m.group(1) + str(
+ int(m.group(2)) + 1
+ )
+ else:
+ desired_username += "1"
+ else:
+ # something else went wrong.
+ break
+
+ if self.hs.config.register_just_use_email_for_display_name:
+ desired_display_name = address
+ else:
+ # Custom mapping between email address and display name
+ desired_display_name = self._map_email_to_displayname(address)
+ elif (
+ self.hs.config.register_mxid_from_3pid == "msisdn"
+ and LoginType.MSISDN in auth_result
+ ):
+ desired_username = auth_result[LoginType.MSISDN]["address"]
+ else:
+ raise SynapseError(
+ 400, "Cannot derive mxid from 3pid; no recognised 3pid"
+ )
+
+ if desired_username is not None:
+ yield self.registration_handler.check_username(
+ desired_username,
+ guest_access_token=guest_access_token,
+ assigned_user_id=registered_user_id,
+ )
+
if registered_user_id is not None:
logger.info(
"Already registered user ID %r for this session", registered_user_id
@@ -431,9 +670,16 @@ class RegisterRestServlet(RestServlet):
# NB: This may be from the auth handler and NOT from the POST
assert_params_in_dict(params, ["password"])
- desired_username = params.get("username", None)
+ if not self.hs.config.register_mxid_from_3pid:
+ desired_username = params.get("username", None)
+ else:
+ # we keep the original desired_username derived from the 3pid above
+ pass
+
guest_access_token = params.get("guest_access_token", None)
- new_password = params.get("password", None)
+
+ # XXX: don't we need to validate these for length etc like we did on
+ # the ones from the JSON body earlier on in the method?
if desired_username is not None:
desired_username = desired_username.lower()
@@ -453,11 +699,11 @@ class RegisterRestServlet(RestServlet):
medium = auth_result[login_type]["medium"]
address = auth_result[login_type]["address"]
- existingUid = yield self.store.get_user_id_by_threepid(
+ existing_user_id = yield self.store.get_user_id_by_threepid(
medium, address
)
- if existingUid is not None:
+ if existing_user_id is not None:
raise SynapseError(
400,
"%s is already in use" % medium,
@@ -466,8 +712,9 @@ class RegisterRestServlet(RestServlet):
registered_user_id = yield self.registration_handler.register_user(
localpart=desired_username,
- password=new_password,
+ password=params.get("password", None),
guest_access_token=guest_access_token,
+ default_display_name=desired_display_name,
threepid=threepid,
address=client_addr,
)
@@ -479,6 +726,14 @@ class RegisterRestServlet(RestServlet):
):
yield self.store.upsert_monthly_active_user(registered_user_id)
+ if self.hs.config.shadow_server:
+ yield self.registration_handler.shadow_register(
+ localpart=desired_username,
+ display_name=desired_display_name,
+ auth_result=auth_result,
+ params=params,
+ )
+
# remember that we've now registered that user account, and with
# what user ID (since the user may not have specified)
self.auth_handler.set_session_data(
@@ -496,56 +751,38 @@ class RegisterRestServlet(RestServlet):
user_id=registered_user_id,
auth_result=auth_result,
access_token=return_dict.get("access_token"),
- bind_email=params.get("bind_email"),
- bind_msisdn=params.get("bind_msisdn"),
)
- return (200, return_dict)
+ return 200, return_dict
def on_OPTIONS(self, _):
return 200, {}
@defer.inlineCallbacks
- def _do_appservice_registration(self, username, as_token, body):
+ def _do_appservice_registration(
+ self, username, password, display_name, as_token, body
+ ):
+
+ # FIXME: appservice_register() is horribly duplicated with register()
+ # and they should probably just be combined together with a config flag.
user_id = yield self.registration_handler.appservice_register(
- username, as_token
+ username, as_token, password, display_name
)
- return (yield self._create_registration_details(user_id, body))
+ result = yield self._create_registration_details(user_id, body)
- @defer.inlineCallbacks
- def _do_shared_secret_registration(self, username, password, body):
- if not self.hs.config.registration_shared_secret:
- raise SynapseError(400, "Shared secret registration is not enabled")
- if not username:
- raise SynapseError(
- 400, "username must be specified", errcode=Codes.BAD_JSON
+ auth_result = body.get("auth_result")
+ if auth_result and LoginType.EMAIL_IDENTITY in auth_result:
+ threepid = auth_result[LoginType.EMAIL_IDENTITY]
+ yield self._register_email_threepid(
+ user_id, threepid, result["access_token"], body.get("bind_email")
)
- # use the username from the original request rather than the
- # downcased one in `username` for the mac calculation
- user = body["username"].encode("utf-8")
-
- # str() because otherwise hmac complains that 'unicode' does not
- # have the buffer interface
- got_mac = str(body["mac"])
-
- # FIXME this is different to the /v1/register endpoint, which
- # includes the password and admin flag in the hashed text. Why are
- # these different?
- want_mac = hmac.new(
- key=self.hs.config.registration_shared_secret.encode(),
- msg=user,
- digestmod=sha1,
- ).hexdigest()
-
- if not compare_digest(want_mac, got_mac):
- raise SynapseError(403, "HMAC incorrect")
-
- user_id = yield self.registration_handler.register_user(
- localpart=username, password=password
- )
+ if auth_result and LoginType.MSISDN in auth_result:
+ threepid = auth_result[LoginType.MSISDN]
+ yield self._register_msisdn_threepid(
+ user_id, threepid, result["access_token"], body.get("bind_msisdn")
+ )
- result = yield self._create_registration_details(user_id, body)
return result
@defer.inlineCallbacks
@@ -599,8 +836,63 @@ class RegisterRestServlet(RestServlet):
)
+def cap(name):
+ """Capitalise parts of a name containing different words, including those
+ separated by hyphens.
+ For example, 'John-Doe'
+
+ Args:
+ name (str): The name to parse
+ """
+ if not name:
+ return name
+
+ # Split the name by whitespace then hyphens, capitalizing each part then
+ # joining it back together.
+ capatilized_name = " ".join(
+ "-".join(part.capitalize() for part in space_part.split("-"))
+ for space_part in name.split()
+ )
+ return capatilized_name
+
+
+def _map_email_to_displayname(address):
+ """Custom mapping from an email address to a user displayname
+
+ Args:
+ address (str): The email address to process
+ Returns:
+ str: The new displayname
+ """
+ # Split the part before and after the @ in the email.
+ # Replace all . with spaces in the first part
+ parts = address.replace(".", " ").split("@")
+
+ # Figure out which org this email address belongs to
+ org_parts = parts[1].split(" ")
+
+ # If this is a ...matrix.org email, mark them as an Admin
+ if org_parts[-2] == "matrix" and org_parts[-1] == "org":
+ org = "Tchap Admin"
+
+ # Is this is a ...gouv.fr address, set the org to whatever is before
+ # gouv.fr. If there isn't anything (a @gouv.fr email) simply mark their
+ # org as "gouv"
+ elif org_parts[-2] == "gouv" and org_parts[-1] == "fr":
+ org = org_parts[-3] if len(org_parts) > 2 else org_parts[-2]
+
+ # Otherwise, mark their org as the email's second-level domain name
+ else:
+ org = org_parts[-2]
+
+ desired_display_name = cap(parts[0]) + " [" + cap(org) + "]"
+
+ return desired_display_name
+
+
def register_servlets(hs, http_server):
EmailRegisterRequestTokenRestServlet(hs).register(http_server)
MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
UsernameAvailabilityRestServlet(hs).register(http_server)
+ RegistrationSubmitTokenServlet(hs).register(http_server)
RegisterRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py
index 1538b247e5..040b37c504 100644
--- a/synapse/rest/client/v2_alpha/relations.py
+++ b/synapse/rest/client/v2_alpha/relations.py
@@ -118,7 +118,7 @@ class RelationSendServlet(RestServlet):
requester, event_dict=event_dict, txn_id=txn_id
)
- return (200, {"event_id": event.event_id})
+ return 200, {"event_id": event.event_id}
class RelationPaginationServlet(RestServlet):
@@ -198,7 +198,7 @@ class RelationPaginationServlet(RestServlet):
return_value["chunk"] = events
return_value["original_event"] = original_event
- return (200, return_value)
+ return 200, return_value
class RelationAggregationPaginationServlet(RestServlet):
@@ -270,7 +270,7 @@ class RelationAggregationPaginationServlet(RestServlet):
to_token=to_token,
)
- return (200, pagination_chunk.to_dict())
+ return 200, pagination_chunk.to_dict()
class RelationAggregationGroupPaginationServlet(RestServlet):
@@ -356,7 +356,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
return_value = result.to_dict()
return_value["chunk"] = events
- return (200, return_value)
+ return 200, return_value
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py
index 3fdd4584a3..e7449864cd 100644
--- a/synapse/rest/client/v2_alpha/report_event.py
+++ b/synapse/rest/client/v2_alpha/report_event.py
@@ -72,7 +72,7 @@ class ReportEventRestServlet(RestServlet):
received_ts=self.clock.time_msec(),
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py
index 10dec96208..df4f44cd36 100644
--- a/synapse/rest/client/v2_alpha/room_keys.py
+++ b/synapse/rest/client/v2_alpha/room_keys.py
@@ -135,7 +135,7 @@ class RoomKeysServlet(RestServlet):
body = {"rooms": {room_id: body}}
yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_GET(self, request, room_id, session_id):
@@ -218,7 +218,7 @@ class RoomKeysServlet(RestServlet):
else:
room_keys = room_keys["rooms"][room_id]
- return (200, room_keys)
+ return 200, room_keys
@defer.inlineCallbacks
def on_DELETE(self, request, room_id, session_id):
@@ -242,7 +242,7 @@ class RoomKeysServlet(RestServlet):
yield self.e2e_room_keys_handler.delete_room_keys(
user_id, version, room_id, session_id
)
- return (200, {})
+ return 200, {}
class RoomKeysNewVersionServlet(RestServlet):
@@ -293,7 +293,7 @@ class RoomKeysNewVersionServlet(RestServlet):
info = parse_json_object_from_request(request)
new_version = yield self.e2e_room_keys_handler.create_version(user_id, info)
- return (200, {"version": new_version})
+ return 200, {"version": new_version}
# we deliberately don't have a PUT /version, as these things really should
# be immutable to avoid people footgunning
@@ -338,7 +338,7 @@ class RoomKeysVersionServlet(RestServlet):
except SynapseError as e:
if e.code == 404:
raise SynapseError(404, "No backup found", Codes.NOT_FOUND)
- return (200, info)
+ return 200, info
@defer.inlineCallbacks
def on_DELETE(self, request, version):
@@ -358,7 +358,7 @@ class RoomKeysVersionServlet(RestServlet):
user_id = requester.user.to_string()
yield self.e2e_room_keys_handler.delete_version(user_id, version)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_PUT(self, request, version):
@@ -392,7 +392,7 @@ class RoomKeysVersionServlet(RestServlet):
)
yield self.e2e_room_keys_handler.update_version(user_id, version, info)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
index 14ba61a63e..d2c3316eb7 100644
--- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
+++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
@@ -80,7 +80,7 @@ class RoomUpgradeRestServlet(RestServlet):
ret = {"replacement_room": new_room_id}
- return (200, ret)
+ return 200, ret
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py
index 2613648d82..d90e52ed1a 100644
--- a/synapse/rest/client/v2_alpha/sendtodevice.py
+++ b/synapse/rest/client/v2_alpha/sendtodevice.py
@@ -19,6 +19,7 @@ from twisted.internet import defer
from synapse.http import servlet
from synapse.http.servlet import parse_json_object_from_request
+from synapse.logging.opentracing import set_tag, trace
from synapse.rest.client.transactions import HttpTransactionCache
from ._base import client_patterns
@@ -42,7 +43,10 @@ class SendToDeviceRestServlet(servlet.RestServlet):
self.txns = HttpTransactionCache(hs)
self.device_message_handler = hs.get_device_message_handler()
+ @trace(opname="sendToDevice")
def on_PUT(self, request, message_type, txn_id):
+ set_tag("message_type", message_type)
+ set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
request, self._put, request, message_type, txn_id
)
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index 7b32dd2212..c98c5a3802 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -174,7 +174,7 @@ class SyncRestServlet(RestServlet):
time_now, sync_result, requester.access_token_id, filter
)
- return (200, response_content)
+ return 200, response_content
@defer.inlineCallbacks
def encode_response(self, time_now, sync_result, access_token_id, filter):
diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py
index d173544355..3b555669a0 100644
--- a/synapse/rest/client/v2_alpha/tags.py
+++ b/synapse/rest/client/v2_alpha/tags.py
@@ -45,7 +45,7 @@ class TagListServlet(RestServlet):
tags = yield self.store.get_tags_for_room(user_id, room_id)
- return (200, {"tags": tags})
+ return 200, {"tags": tags}
class TagServlet(RestServlet):
@@ -76,7 +76,7 @@ class TagServlet(RestServlet):
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_DELETE(self, request, user_id, room_id, tag):
@@ -88,7 +88,7 @@ class TagServlet(RestServlet):
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py
index 158e686b01..2e8d672471 100644
--- a/synapse/rest/client/v2_alpha/thirdparty.py
+++ b/synapse/rest/client/v2_alpha/thirdparty.py
@@ -40,7 +40,7 @@ class ThirdPartyProtocolsServlet(RestServlet):
yield self.auth.get_user_by_req(request, allow_guest=True)
protocols = yield self.appservice_handler.get_3pe_protocols()
- return (200, protocols)
+ return 200, protocols
class ThirdPartyProtocolServlet(RestServlet):
@@ -60,9 +60,9 @@ class ThirdPartyProtocolServlet(RestServlet):
only_protocol=protocol
)
if protocol in protocols:
- return (200, protocols[protocol])
+ return 200, protocols[protocol]
else:
- return (404, {"error": "Unknown protocol"})
+ return 404, {"error": "Unknown protocol"}
class ThirdPartyUserServlet(RestServlet):
@@ -85,7 +85,7 @@ class ThirdPartyUserServlet(RestServlet):
ThirdPartyEntityKind.USER, protocol, fields
)
- return (200, results)
+ return 200, results
class ThirdPartyLocationServlet(RestServlet):
@@ -108,7 +108,7 @@ class ThirdPartyLocationServlet(RestServlet):
ThirdPartyEntityKind.LOCATION, protocol, fields
)
- return (200, results)
+ return 200, results
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py
index 7ab2b80e46..e586fc595f 100644
--- a/synapse/rest/client/v2_alpha/user_directory.py
+++ b/synapse/rest/client/v2_alpha/user_directory.py
@@ -15,10 +15,13 @@
import logging
+from signedjson.sign import sign_json
+
from twisted.internet import defer
from synapse.api.errors import SynapseError
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.types import UserID
from ._base import client_patterns
@@ -37,6 +40,7 @@ class UserDirectorySearchRestServlet(RestServlet):
self.hs = hs
self.auth = hs.get_auth()
self.user_directory_handler = hs.get_user_directory_handler()
+ self.http_client = hs.get_simple_http_client()
@defer.inlineCallbacks
def on_POST(self, request):
@@ -60,10 +64,20 @@ class UserDirectorySearchRestServlet(RestServlet):
user_id = requester.user.to_string()
if not self.hs.config.user_directory_search_enabled:
- return (200, {"limited": False, "results": []})
+ return 200, {"limited": False, "results": []}
body = parse_json_object_from_request(request)
+ if self.hs.config.user_directory_defer_to_id_server:
+ signed_body = sign_json(
+ body, self.hs.hostname, self.hs.config.signing_key[0]
+ )
+ url = "%s/_matrix/identity/api/v1/user_directory/search" % (
+ self.hs.config.user_directory_defer_to_id_server,
+ )
+ resp = yield self.http_client.post_json_get_json(url, signed_body)
+ defer.returnValue((200, resp))
+
limit = body.get("limit", 10)
limit = min(limit, 50)
@@ -76,8 +90,90 @@ class UserDirectorySearchRestServlet(RestServlet):
user_id, search_term, limit
)
- return (200, results)
+ return 200, results
+
+
+class UserInfoServlet(RestServlet):
+ """
+ GET /user/{user_id}/info HTTP/1.1
+ """
+
+ PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/info$")
+
+ def __init__(self, hs):
+ super(UserInfoServlet, self).__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastore()
+ self.notifier = hs.get_notifier()
+ self.clock = hs.get_clock()
+ self.transport_layer = hs.get_federation_transport_client()
+ registry = hs.get_federation_registry()
+
+ if not registry.query_handlers.get("user_info"):
+ registry.register_query_handler("user_info", self._on_federation_query)
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id):
+ # Ensure the user is authenticated
+ yield self.auth.get_user_by_req(request, allow_guest=False)
+
+ user = UserID.from_string(user_id)
+ if not self.hs.is_mine(user):
+ # Attempt to make a federation request to the server that owns this user
+ args = {"user_id": user_id}
+ res = yield self.transport_layer.make_query(
+ user.domain, "user_info", args, retry_on_dns_fail=True
+ )
+ defer.returnValue((200, res))
+
+ res = yield self._get_user_info(user_id)
+ defer.returnValue((200, res))
+
+ @defer.inlineCallbacks
+ def _on_federation_query(self, args):
+ """Called when a request for user information appears over federation
+
+ Args:
+ args (dict): Dictionary of query arguments provided by the request
+
+ Returns:
+ Deferred[dict]: Deactivation and expiration information for a given user
+ """
+ user_id = args.get("user_id")
+ if not user_id:
+ raise SynapseError(400, "user_id not provided")
+
+ user = UserID.from_string(user_id)
+ if not self.hs.is_mine(user):
+ raise SynapseError(400, "User is not hosted on this homeserver")
+
+ res = yield self._get_user_info(user_id)
+ defer.returnValue(res)
+
+ @defer.inlineCallbacks
+ def _get_user_info(self, user_id):
+ """Retrieve information about a given user
+
+ Args:
+ user_id (str): The User ID of a given user on this homeserver
+
+ Returns:
+ Deferred[dict]: Deactivation and expiration information for a given user
+ """
+ # Check whether user is deactivated
+ is_deactivated = yield self.store.get_user_deactivated_status(user_id)
+
+ # Check whether user is expired
+ expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
+ is_expired = (
+ expiration_ts is not None and self.clock.time_msec() >= expiration_ts
+ )
+
+ res = {"expired": is_expired, "deactivated": is_deactivated}
+ defer.returnValue(res)
def register_servlets(hs, http_server):
UserDirectorySearchRestServlet(hs).register(http_server)
+ UserInfoServlet(hs).register(http_server)
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 0e09191632..0058b6b459 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -24,6 +24,10 @@ logger = logging.getLogger(__name__)
class VersionsRestServlet(RestServlet):
PATTERNS = [re.compile("^/_matrix/client/versions$")]
+ def __init__(self, hs):
+ super(VersionsRestServlet, self).__init__()
+ self.config = hs.config
+
def on_GET(self, request):
return (
200,
@@ -49,5 +53,5 @@ class VersionsRestServlet(RestServlet):
)
-def register_servlets(http_server):
- VersionsRestServlet().register(http_server)
+def register_servlets(hs, http_server):
+ VersionsRestServlet(hs).register(http_server)
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 031a316693..55580bc59e 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -13,7 +13,9 @@
# limitations under the License.
import logging
-from io import BytesIO
+
+from canonicaljson import encode_canonical_json, json
+from signedjson.sign import sign_json
from twisted.internet import defer
@@ -95,6 +97,7 @@ class RemoteKey(DirectServeResource):
self.store = hs.get_datastore()
self.clock = hs.get_clock()
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
+ self.config = hs.config
@wrap_json_request_handler
async def _async_render_GET(self, request):
@@ -214,15 +217,14 @@ class RemoteKey(DirectServeResource):
yield self.fetcher.get_keys(cache_misses)
yield self.query_keys(request, query, query_remote_on_cache_miss=False)
else:
- result_io = BytesIO()
- result_io.write(b'{"server_keys":')
- sep = b"["
- for json_bytes in json_results:
- result_io.write(sep)
- result_io.write(json_bytes)
- sep = b","
- if sep == b"[":
- result_io.write(sep)
- result_io.write(b"]}")
-
- respond_with_json_bytes(request, 200, result_io.getvalue())
+ signed_keys = []
+ for key_json in json_results:
+ key_json = json.loads(key_json)
+ for signing_key in self.config.key_server_signing_keys:
+ key_json = sign_json(key_json, self.config.server_name, signing_key)
+
+ signed_keys.append(key_json)
+
+ results = {"server_keys": signed_keys}
+
+ respond_with_json_bytes(request, 200, encode_canonical_json(results))
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index cf5759e9a6..b972e152a9 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -318,14 +318,14 @@ class MediaRepository(object):
responder = yield self.media_storage.fetch_media(file_info)
if responder:
- return (responder, media_info)
+ return responder, media_info
# Failed to find the file anywhere, lets download it.
media_info = yield self._download_remote_file(server_name, media_id, file_id)
responder = yield self.media_storage.fetch_media(file_info)
- return (responder, media_info)
+ return responder, media_info
@defer.inlineCallbacks
def _download_remote_file(self, server_name, media_id, file_id):
@@ -526,7 +526,7 @@ class MediaRepository(object):
try:
file_info = FileInfo(
server_name=server_name,
- file_id=media_id,
+ file_id=file_id,
thumbnail=True,
thumbnail_width=t_width,
thumbnail_height=t_height,
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index bd40891a7f..fbc2fc3a2f 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -74,6 +74,8 @@ class PreviewUrlResource(DirectServeResource):
treq_args={"browser_like_redirects": True},
ip_whitelist=hs.config.url_preview_ip_range_whitelist,
ip_blacklist=hs.config.url_preview_ip_range_blacklist,
+ http_proxy=os.getenv("http_proxy"),
+ https_proxy=os.getenv("HTTPS_PROXY"),
)
self.media_repo = media_repo
self.primary_base_path = media_repo.primary_base_path
@@ -183,7 +185,6 @@ class PreviewUrlResource(DirectServeResource):
if isinstance(og, six.text_type):
og = og.encode("utf8")
return og
- return
media_info = yield self._download_url(url, user)
diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py
index 90d8e6bffe..c995d7e043 100644
--- a/synapse/rest/media/v1/thumbnailer.py
+++ b/synapse/rest/media/v1/thumbnailer.py
@@ -78,9 +78,9 @@ class Thumbnailer(object):
"""
if max_width * self.height < max_height * self.width:
- return (max_width, (max_width * self.height) // self.width)
+ return max_width, (max_width * self.height) // self.width
else:
- return ((max_height * self.width) // self.height, max_height)
+ return (max_height * self.width) // self.height, max_height
def scale(self, width, height, output_type):
"""Rescales the image to the given dimensions.
diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py
index 5e8fda4b65..20177b44e7 100644
--- a/synapse/rest/well_known.py
+++ b/synapse/rest/well_known.py
@@ -34,7 +34,7 @@ class WellKnownBuilder(object):
self._config = hs.config
def get_well_known(self):
- # if we don't have a public_base_url, we can't help much here.
+ # if we don't have a public_baseurl, we can't help much here.
if self._config.public_baseurl is None:
return None
diff --git a/synapse/rulecheck/__init__.py b/synapse/rulecheck/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/synapse/rulecheck/__init__.py
diff --git a/synapse/rulecheck/domain_rule_checker.py b/synapse/rulecheck/domain_rule_checker.py
new file mode 100644
index 0000000000..6f2a1931c5
--- /dev/null
+++ b/synapse/rulecheck/domain_rule_checker.py
@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.config._base import ConfigError
+
+logger = logging.getLogger(__name__)
+
+
+class DomainRuleChecker(object):
+ """
+ A re-implementation of the SpamChecker that prevents users in one domain from
+ inviting users in other domains to rooms, based on a configuration.
+
+ Takes a config in the format:
+
+ spam_checker:
+ module: "rulecheck.DomainRuleChecker"
+ config:
+ domain_mapping:
+ "inviter_domain": [ "invitee_domain_permitted", "other_domain_permitted" ]
+ "other_inviter_domain": [ "invitee_domain_permitted" ]
+ default: False
+
+ # Only let local users join rooms if they were explicitly invited.
+ can_only_join_rooms_with_invite: false
+
+ # Only let local users create rooms if they are inviting only one
+ # other user, and that user matches the rules above.
+ can_only_create_one_to_one_rooms: false
+
+ # Only let local users invite during room creation, regardless of the
+ # domain mapping rules above.
+ can_only_invite_during_room_creation: false
+
+ # Prevent local users from inviting users from certain domains to
+ # rooms published in the room directory.
+ domains_prevented_from_being_invited_to_published_rooms: []
+
+ # Allow third party invites
+ can_invite_by_third_party_id: true
+
+ Don't forget to consider if you can invite users from your own domain.
+ """
+
+ def __init__(self, config):
+ self.domain_mapping = config["domain_mapping"] or {}
+ self.default = config["default"]
+
+ self.can_only_join_rooms_with_invite = config.get(
+ "can_only_join_rooms_with_invite", False
+ )
+ self.can_only_create_one_to_one_rooms = config.get(
+ "can_only_create_one_to_one_rooms", False
+ )
+ self.can_only_invite_during_room_creation = config.get(
+ "can_only_invite_during_room_creation", False
+ )
+ self.can_invite_by_third_party_id = config.get(
+ "can_invite_by_third_party_id", True
+ )
+ self.domains_prevented_from_being_invited_to_published_rooms = config.get(
+ "domains_prevented_from_being_invited_to_published_rooms", []
+ )
+
+ def check_event_for_spam(self, event):
+ """Implements synapse.events.SpamChecker.check_event_for_spam
+ """
+ return False
+
+ def user_may_invite(
+ self,
+ inviter_userid,
+ invitee_userid,
+ third_party_invite,
+ room_id,
+ new_room,
+ published_room=False,
+ ):
+ """Implements synapse.events.SpamChecker.user_may_invite
+ """
+ if self.can_only_invite_during_room_creation and not new_room:
+ return False
+
+ if not self.can_invite_by_third_party_id and third_party_invite:
+ return False
+
+ # This is a third party invite (without a bound mxid), so unless we have
+ # banned all third party invites (above) we allow it.
+ if not invitee_userid:
+ return True
+
+ inviter_domain = self._get_domain_from_id(inviter_userid)
+ invitee_domain = self._get_domain_from_id(invitee_userid)
+
+ if inviter_domain not in self.domain_mapping:
+ return self.default
+
+ if (
+ published_room
+ and invitee_domain
+ in self.domains_prevented_from_being_invited_to_published_rooms
+ ):
+ return False
+
+ return invitee_domain in self.domain_mapping[inviter_domain]
+
+ def user_may_create_room(
+ self, userid, invite_list, third_party_invite_list, cloning
+ ):
+ """Implements synapse.events.SpamChecker.user_may_create_room
+ """
+
+ if cloning:
+ return True
+
+ if not self.can_invite_by_third_party_id and third_party_invite_list:
+ return False
+
+ number_of_invites = len(invite_list) + len(third_party_invite_list)
+
+ if self.can_only_create_one_to_one_rooms and number_of_invites != 1:
+ return False
+
+ return True
+
+ def user_may_create_room_alias(self, userid, room_alias):
+ """Implements synapse.events.SpamChecker.user_may_create_room_alias
+ """
+ return True
+
+ def user_may_publish_room(self, userid, room_id):
+ """Implements synapse.events.SpamChecker.user_may_publish_room
+ """
+ return True
+
+ def user_may_join_room(self, userid, room_id, is_invited):
+ """Implements synapse.events.SpamChecker.user_may_join_room
+ """
+ if self.can_only_join_rooms_with_invite and not is_invited:
+ return False
+
+ return True
+
+ @staticmethod
+ def parse_config(config):
+ """Implements synapse.events.SpamChecker.parse_config
+ """
+ if "default" in config:
+ return config
+ else:
+ raise ConfigError("No default set for spam_config DomainRuleChecker")
+
+ @staticmethod
+ def _get_domain_from_id(mxid):
+ """Parses a string and returns the domain part of the mxid.
+
+ Args:
+ mxid (str): a valid mxid
+
+ Returns:
+ str: the domain part of the mxid
+
+ """
+ idx = mxid.find(":")
+ if idx == -1:
+ raise Exception("Invalid ID: %r" % (mxid,))
+ return mxid[idx + 1 :]
diff --git a/synapse/server.py b/synapse/server.py
index 9e28dba2b1..79987c39e8 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -23,6 +23,7 @@
# Imports required for the default HomeServer() implementation
import abc
import logging
+import os
from twisted.enterprise import adbapi
from twisted.mail.smtp import sendmail
@@ -65,6 +66,7 @@ from synapse.handlers.groups_local import GroupsLocalHandler
from synapse.handlers.initial_sync import InitialSyncHandler
from synapse.handlers.message import EventCreationHandler, MessageHandler
from synapse.handlers.pagination import PaginationHandler
+from synapse.handlers.password_policy import PasswordPolicyHandler
from synapse.handlers.presence import PresenceHandler
from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler
from synapse.handlers.read_marker import ReadMarkerHandler
@@ -166,6 +168,7 @@ class HomeServer(object):
"event_builder_factory",
"filtering",
"http_client_context_factory",
+ "proxied_http_client",
"simple_http_client",
"media_repository",
"media_repository_resource",
@@ -196,6 +199,7 @@ class HomeServer(object):
"account_validity_handler",
"saml_handler",
"event_client_serializer",
+ "password_policy_handler",
]
REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"]
@@ -221,6 +225,7 @@ class HomeServer(object):
self.clock = Clock(reactor)
self.distributor = Distributor()
self.ratelimiter = Ratelimiter()
+ self.admin_redaction_ratelimiter = Ratelimiter()
self.registration_ratelimiter = Ratelimiter()
self.datastore = None
@@ -279,6 +284,9 @@ class HomeServer(object):
def get_registration_ratelimiter(self):
return self.registration_ratelimiter
+ def get_admin_redaction_ratelimiter(self):
+ return self.admin_redaction_ratelimiter
+
def build_federation_client(self):
return FederationClient(self)
@@ -304,6 +312,13 @@ class HomeServer(object):
def build_simple_http_client(self):
return SimpleHttpClient(self)
+ def build_proxied_http_client(self):
+ return SimpleHttpClient(
+ self,
+ http_proxy=os.getenv("http_proxy"),
+ https_proxy=os.getenv("HTTPS_PROXY"),
+ )
+
def build_room_creation_handler(self):
return RoomCreationHandler(self)
@@ -533,6 +548,9 @@ class HomeServer(object):
def build_event_client_serializer(self):
return EventClientSerializer(self)
+ def build_password_policy_handler(self):
+ return PasswordPolicyHandler(self)
+
def remove_pusher(self, app_id, push_key, user_id):
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 16f8f6b573..56f9cd06e5 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -12,6 +12,7 @@ import synapse.handlers.message
import synapse.handlers.room
import synapse.handlers.room_member
import synapse.handlers.set_password
+import synapse.http.client
import synapse.rest.media.v1.media_repository
import synapse.server_notices.server_notices_manager
import synapse.server_notices.server_notices_sender
@@ -38,6 +39,14 @@ class HomeServer(object):
pass
def get_state_resolution_handler(self) -> synapse.state.StateResolutionHandler:
pass
+ def get_simple_http_client(self) -> synapse.http.client.SimpleHttpClient:
+ """Fetch an HTTP client implementation which doesn't do any blacklisting
+ or support any HTTP_PROXY settings"""
+ pass
+ def get_proxied_http_client(self) -> synapse.http.client.SimpleHttpClient:
+ """Fetch an HTTP client implementation which doesn't do any blacklisting
+ but does support HTTP_PROXY settings"""
+ pass
def get_deactivate_account_handler(
self
) -> synapse.handlers.deactivate_account.DeactivateAccountHandler:
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index 729c097e6d..81c4aff496 100644
--- a/synapse/server_notices/resource_limits_server_notices.py
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -193,4 +193,4 @@ class ResourceLimitsServerNotices(object):
if event_id in referenced_events:
referenced_events.remove(event.event_id)
- return (currently_blocked, referenced_events)
+ return currently_blocked, referenced_events
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index a0d34f16ea..2b0f4c79ee 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -136,7 +136,6 @@ class StateHandler(object):
if event_id:
event = yield self.store.get_event(event_id, allow_none=True)
return event
- return
state_map = yield self.store.get_events(
list(state.values()), get_prev_content=False
diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js
index e02663f50e..276c271bbe 100644
--- a/synapse/static/client/login/js/login.js
+++ b/synapse/static/client/login/js/login.js
@@ -62,7 +62,7 @@ var show_login = function() {
$("#sso_flow").show();
}
- if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas) {
+ if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas && !matrixLogin.serverAcceptsSso) {
$("#no_login_types").show();
}
};
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 489ce82fae..1ef5662c31 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -360,14 +360,11 @@ class SQLBaseStore(object):
expiration_ts,
)
- self._simple_insert_txn(
+ self._simple_upsert_txn(
txn,
"account_validity",
- values={
- "user_id": user_id,
- "expiration_ts_ms": expiration_ts,
- "email_sent": False,
- },
+ keyvalues={"user_id": user_id},
+ values={"expiration_ts_ms": expiration_ts, "email_sent": False},
)
def start_profiling(self):
@@ -1395,14 +1392,22 @@ class SQLBaseStore(object):
"""
txn.call_after(self._invalidate_state_caches, room_id, members_changed)
- # We need to be careful that the size of the `members_changed` list
- # isn't so large that it causes problems sending over replication, so we
- # send them in chunks.
- # Max line length is 16K, and max user ID length is 255, so 50 should
- # be safe.
- for chunk in batch_iter(members_changed, 50):
- keys = itertools.chain([room_id], chunk)
- self._send_invalidation_to_replication(txn, _CURRENT_STATE_CACHE_NAME, keys)
+ if members_changed:
+ # We need to be careful that the size of the `members_changed` list
+ # isn't so large that it causes problems sending over replication, so we
+ # send them in chunks.
+ # Max line length is 16K, and max user ID length is 255, so 50 should
+ # be safe.
+ for chunk in batch_iter(members_changed, 50):
+ keys = itertools.chain([room_id], chunk)
+ self._send_invalidation_to_replication(
+ txn, _CURRENT_STATE_CACHE_NAME, keys
+ )
+ else:
+ # if no members changed, we still need to invalidate the other caches.
+ self._send_invalidation_to_replication(
+ txn, _CURRENT_STATE_CACHE_NAME, [room_id]
+ )
def _invalidate_state_caches(self, room_id, members_changed):
"""Invalidates caches that are based on the current state, but does
diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py
index 9fa5b4f3d6..6afbfc0d74 100644
--- a/synapse/storage/account_data.py
+++ b/synapse/storage/account_data.py
@@ -90,7 +90,7 @@ class AccountDataWorkerStore(SQLBaseStore):
room_data = by_room.setdefault(row["room_id"], {})
room_data[row["account_data_type"]] = json.loads(row["content"])
- return (global_account_data, by_room)
+ return global_account_data, by_room
return self.runInteraction(
"get_account_data_for_user", get_account_data_for_user_txn
@@ -205,7 +205,7 @@ class AccountDataWorkerStore(SQLBaseStore):
)
txn.execute(sql, (last_room_id, current_id, limit))
room_results = txn.fetchall()
- return (global_results, room_results)
+ return global_results, room_results
return self.runInteraction(
"get_all_updated_account_data_txn", get_updated_account_data_txn
@@ -244,13 +244,13 @@ class AccountDataWorkerStore(SQLBaseStore):
room_account_data = account_data_by_room.setdefault(row[0], {})
room_account_data[row[1]] = json.loads(row[2])
- return (global_account_data, account_data_by_room)
+ return global_account_data, account_data_by_room
changed = self._account_data_stream_cache.has_entity_changed(
user_id, int(stream_id)
)
if not changed:
- return ({}, {})
+ return {}, {}
return self.runInteraction(
"get_updated_account_data_for_user", get_updated_account_data_for_user_txn
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index 05d9c05c3f..ec19ce5ca4 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -35,7 +35,7 @@ def _make_exclusive_regex(services_cache):
exclusive_user_regexes = [
regex.pattern
for service in services_cache
- for regex in service.get_exlusive_user_regexes()
+ for regex in service.get_exclusive_user_regexes()
]
if exclusive_user_regexes:
exclusive_user_regex = "|".join("(" + r + ")" for r in exclusive_user_regexes)
@@ -165,7 +165,6 @@ class ApplicationServiceTransactionWorkerStore(
)
if result:
return result.get("state")
- return
return None
def set_appservice_state(self, service, state):
@@ -358,7 +357,7 @@ class ApplicationServiceTransactionWorkerStore(
events = yield self.get_events_as_list(event_ids)
- return (upper_bound, events)
+ return upper_bound, events
class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore):
diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py
index 79bb0ea46d..6b7458304e 100644
--- a/synapse/storage/deviceinbox.py
+++ b/synapse/storage/deviceinbox.py
@@ -19,6 +19,7 @@ from canonicaljson import json
from twisted.internet import defer
+from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.util.caches.expiringcache import ExpiringCache
@@ -66,12 +67,13 @@ class DeviceInboxWorkerStore(SQLBaseStore):
messages.append(json.loads(row[1]))
if len(messages) < limit:
stream_pos = current_stream_id
- return (messages, stream_pos)
+ return messages, stream_pos
return self.runInteraction(
"get_new_messages_for_device", get_new_messages_for_device_txn
)
+ @trace
@defer.inlineCallbacks
def delete_messages_for_device(self, user_id, device_id, up_to_stream_id):
"""
@@ -87,11 +89,15 @@ class DeviceInboxWorkerStore(SQLBaseStore):
last_deleted_stream_id = self._last_device_delete_cache.get(
(user_id, device_id), None
)
+
+ set_tag("last_deleted_stream_id", last_deleted_stream_id)
+
if last_deleted_stream_id:
has_changed = self._device_inbox_stream_cache.has_entity_changed(
user_id, last_deleted_stream_id
)
if not has_changed:
+ log_kv({"message": "No changes in cache since last check"})
return 0
def delete_messages_for_device_txn(txn):
@@ -107,6 +113,10 @@ class DeviceInboxWorkerStore(SQLBaseStore):
"delete_messages_for_device", delete_messages_for_device_txn
)
+ log_kv(
+ {"message": "deleted {} messages for device".format(count), "count": count}
+ )
+
# Update the cache, ensuring that we only ever increase the value
last_deleted_stream_id = self._last_device_delete_cache.get(
(user_id, device_id), 0
@@ -117,6 +127,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
return count
+ @trace
def get_new_device_msgs_for_remote(
self, destination, last_stream_id, current_stream_id, limit
):
@@ -132,16 +143,23 @@ class DeviceInboxWorkerStore(SQLBaseStore):
in the stream the messages got to.
"""
+ set_tag("destination", destination)
+ set_tag("last_stream_id", last_stream_id)
+ set_tag("current_stream_id", current_stream_id)
+ set_tag("limit", limit)
+
has_changed = self._device_federation_outbox_stream_cache.has_entity_changed(
destination, last_stream_id
)
if not has_changed or last_stream_id == current_stream_id:
+ log_kv({"message": "No new messages in stream"})
return defer.succeed(([], current_stream_id))
if limit <= 0:
# This can happen if we run out of room for EDUs in the transaction.
return defer.succeed(([], last_stream_id))
+ @trace
def get_new_messages_for_remote_destination_txn(txn):
sql = (
"SELECT stream_id, messages_json FROM device_federation_outbox"
@@ -156,14 +174,16 @@ class DeviceInboxWorkerStore(SQLBaseStore):
stream_pos = row[0]
messages.append(json.loads(row[1]))
if len(messages) < limit:
+ log_kv({"message": "Set stream position to current position"})
stream_pos = current_stream_id
- return (messages, stream_pos)
+ return messages, stream_pos
return self.runInteraction(
"get_new_device_msgs_for_remote",
get_new_messages_for_remote_destination_txn,
)
+ @trace
def delete_device_msgs_for_remote(self, destination, up_to_stream_id):
"""Used to delete messages when the remote destination acknowledges
their receipt.
@@ -214,6 +234,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore):
expiry_ms=30 * 60 * 1000,
)
+ @trace
@defer.inlineCallbacks
def add_messages_to_device_inbox(
self, local_messages_by_user_then_device, remote_messages_by_destination
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
index 8f72d92895..99ecbd70d2 100644
--- a/synapse/storage/devices.py
+++ b/synapse/storage/devices.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,7 +22,13 @@ from canonicaljson import json
from twisted.internet import defer
-from synapse.api.errors import StoreError
+from synapse.api.errors import Codes, StoreError
+from synapse.logging.opentracing import (
+ get_active_span_text_map,
+ set_tag,
+ trace,
+ whitelisted_homeserver,
+)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import Cache, SQLBaseStore, db_to_json
from synapse.storage.background_updates import BackgroundUpdateStore
@@ -36,7 +44,8 @@ DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = (
class DeviceWorkerStore(SQLBaseStore):
def get_device(self, user_id, device_id):
- """Retrieve a device.
+ """Retrieve a device. Only returns devices that are not marked as
+ hidden.
Args:
user_id (str): The ID of the user which owns the device
@@ -48,14 +57,15 @@ class DeviceWorkerStore(SQLBaseStore):
"""
return self._simple_select_one(
table="devices",
- keyvalues={"user_id": user_id, "device_id": device_id},
+ keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
retcols=("user_id", "device_id", "display_name"),
desc="get_device",
)
@defer.inlineCallbacks
def get_devices_by_user(self, user_id):
- """Retrieve all of a user's registered devices.
+ """Retrieve all of a user's registered devices. Only returns devices
+ that are not marked as hidden.
Args:
user_id (str):
@@ -66,13 +76,14 @@ class DeviceWorkerStore(SQLBaseStore):
"""
devices = yield self._simple_select_list(
table="devices",
- keyvalues={"user_id": user_id},
+ keyvalues={"user_id": user_id, "hidden": False},
retcols=("user_id", "device_id", "display_name"),
desc="get_devices_by_user",
)
return {d["device_id"]: d for d in devices}
+ @trace
@defer.inlineCallbacks
def get_devices_by_remote(self, destination, from_stream_id, limit):
"""Get stream of updates to send to remote servers
@@ -88,7 +99,7 @@ class DeviceWorkerStore(SQLBaseStore):
destination, int(from_stream_id)
)
if not has_changed:
- return (now_stream_id, [])
+ return now_stream_id, []
# We retrieve n+1 devices from the list of outbound pokes where n is
# our outbound device update limit. We then check if the very last
@@ -111,7 +122,7 @@ class DeviceWorkerStore(SQLBaseStore):
# Return an empty list if there are no updates
if not updates:
- return (now_stream_id, [])
+ return now_stream_id, []
# if we have exceeded the limit, we need to exclude any results with the
# same stream_id as the last row.
@@ -127,8 +138,15 @@ class DeviceWorkerStore(SQLBaseStore):
# (user_id, device_id) entries into a map, with the value being
# the max stream_id across each set of duplicate entries
#
- # maps (user_id, device_id) -> stream_id
+ # maps (user_id, device_id) -> (stream_id, opentracing_context)
# as long as their stream_id does not match that of the last row
+ #
+ # opentracing_context contains the opentracing metadata for the request
+ # that created the poke
+ #
+ # The most recent request's opentracing_context is used as the
+ # context which created the Edu.
+
query_map = {}
for update in updates:
if stream_id_cutoff is not None and update[2] >= stream_id_cutoff:
@@ -136,7 +154,14 @@ class DeviceWorkerStore(SQLBaseStore):
break
key = (update[0], update[1])
- query_map[key] = max(query_map.get(key, 0), update[2])
+
+ update_context = update[3]
+ update_stream_id = update[2]
+
+ previous_update_stream_id, _ = query_map.get(key, (0, None))
+
+ if update_stream_id > previous_update_stream_id:
+ query_map[key] = (update_stream_id, update_context)
# If we didn't find any updates with a stream_id lower than the cutoff, it
# means that there are more than limit updates all of which have the same
@@ -147,13 +172,13 @@ class DeviceWorkerStore(SQLBaseStore):
# skip that stream_id and return an empty list, and continue with the next
# stream_id next time.
if not query_map:
- return (stream_id_cutoff, [])
+ return stream_id_cutoff, []
results = yield self._get_device_update_edus_by_remote(
destination, from_stream_id, query_map
)
- return (now_stream_id, results)
+ return now_stream_id, results
def _get_devices_by_remote_txn(
self, txn, destination, from_stream_id, now_stream_id, limit
@@ -171,7 +196,7 @@ class DeviceWorkerStore(SQLBaseStore):
List: List of device updates
"""
sql = """
- SELECT user_id, device_id, stream_id FROM device_lists_outbound_pokes
+ SELECT user_id, device_id, stream_id, opentracing_context FROM device_lists_outbound_pokes
WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ?
ORDER BY stream_id
LIMIT ?
@@ -187,8 +212,9 @@ class DeviceWorkerStore(SQLBaseStore):
Args:
destination (str): The host the device updates are intended for
from_stream_id (int): The minimum stream_id to filter updates by, exclusive
- query_map (Dict[(str, str): int]): Dictionary mapping
- user_id/device_id to update stream_id
+ query_map (Dict[(str, str): (int, str|None)]): Dictionary mapping
+ user_id/device_id to update stream_id and the relevent json-encoded
+ opentracing context
Returns:
List[Dict]: List of objects representing an device update EDU
@@ -210,12 +236,13 @@ class DeviceWorkerStore(SQLBaseStore):
destination, user_id, from_stream_id
)
for device_id, device in iteritems(user_devices):
- stream_id = query_map[(user_id, device_id)]
+ stream_id, opentracing_context = query_map[(user_id, device_id)]
result = {
"user_id": user_id,
"device_id": device_id,
"prev_id": [prev_id] if prev_id else [],
"stream_id": stream_id,
+ "org.matrix.opentracing_context": opentracing_context,
}
prev_id = stream_id
@@ -299,6 +326,7 @@ class DeviceWorkerStore(SQLBaseStore):
def get_device_stream_token(self):
return self._device_list_id_gen.get_current_token()
+ @trace
@defer.inlineCallbacks
def get_user_devices_from_cache(self, query_list):
"""Get the devices (and keys if any) for remote users from the cache.
@@ -330,7 +358,10 @@ class DeviceWorkerStore(SQLBaseStore):
else:
results[user_id] = yield self._get_cached_devices_for_user(user_id)
- return (user_ids_not_in_cache, results)
+ set_tag("in_cache", results)
+ set_tag("not_in_cache", user_ids_not_in_cache)
+
+ return user_ids_not_in_cache, results
@cachedInlineCallbacks(num_args=2, tree=True)
def _get_cached_user_device(self, user_id, device_id):
@@ -540,6 +571,8 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
Returns:
defer.Deferred: boolean whether the device was inserted or an
existing device existed with that ID.
+ Raises:
+ StoreError: if the device is already in use
"""
key = (user_id, device_id)
if self.device_id_exists_cache.get(key, None):
@@ -552,12 +585,25 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
"user_id": user_id,
"device_id": device_id,
"display_name": initial_device_display_name,
+ "hidden": False,
},
desc="store_device",
or_ignore=True,
)
+ if not inserted:
+ # if the device already exists, check if it's a real device, or
+ # if the device ID is reserved by something else
+ hidden = yield self._simple_select_one_onecol(
+ "devices",
+ keyvalues={"user_id": user_id, "device_id": device_id},
+ retcol="hidden",
+ )
+ if hidden:
+ raise StoreError(400, "The device ID is in use", Codes.FORBIDDEN)
self.device_id_exists_cache.prefill(key, True)
return inserted
+ except StoreError:
+ raise
except Exception as e:
logger.error(
"store_device with device_id=%s(%r) user_id=%s(%r)"
@@ -584,7 +630,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
"""
yield self._simple_delete_one(
table="devices",
- keyvalues={"user_id": user_id, "device_id": device_id},
+ keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
desc="delete_device",
)
@@ -604,14 +650,15 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
table="devices",
column="device_id",
iterable=device_ids,
- keyvalues={"user_id": user_id},
+ keyvalues={"user_id": user_id, "hidden": False},
desc="delete_devices",
)
for device_id in device_ids:
self.device_id_exists_cache.invalidate((user_id, device_id))
def update_device(self, user_id, device_id, new_display_name=None):
- """Update a device.
+ """Update a device. Only updates the device if it is not marked as
+ hidden.
Args:
user_id (str): The ID of the user which owns the device
@@ -630,7 +677,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
return defer.succeed(None)
return self._simple_update_one(
table="devices",
- keyvalues={"user_id": user_id, "device_id": device_id},
+ keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
updatevalues=updates,
desc="update_device",
)
@@ -814,6 +861,8 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
],
)
+ context = get_active_span_text_map()
+
self._simple_insert_many_txn(
txn,
table="device_lists_outbound_pokes",
@@ -825,6 +874,9 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
"device_id": device_id,
"sent": False,
"ts": now,
+ "opentracing_context": json.dumps(context)
+ if whitelisted_homeserver(destination)
+ else "{}",
}
for destination in hosts
for device_id in device_ids
diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py
index e966a73f3d..eed7757ed5 100644
--- a/synapse/storage/directory.py
+++ b/synapse/storage/directory.py
@@ -47,7 +47,6 @@ class DirectoryWorkerStore(SQLBaseStore):
if not room_id:
return None
- return
servers = yield self._simple_select_onecol(
"room_alias_servers",
@@ -58,7 +57,6 @@ class DirectoryWorkerStore(SQLBaseStore):
if not servers:
return None
- return
return RoomAliasMapping(room_id, room_alias.to_string(), servers)
diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py
index 99128f2df7..be2fe2bab6 100644
--- a/synapse/storage/e2e_room_keys.py
+++ b/synapse/storage/e2e_room_keys.py
@@ -18,6 +18,7 @@ import json
from twisted.internet import defer
from synapse.api.errors import StoreError
+from synapse.logging.opentracing import log_kv, trace
from ._base import SQLBaseStore
@@ -82,11 +83,11 @@ class EndToEndRoomKeyStore(SQLBaseStore):
table="e2e_room_keys",
keyvalues={
"user_id": user_id,
+ "version": version,
"room_id": room_id,
"session_id": session_id,
},
values={
- "version": version,
"first_message_index": room_key["first_message_index"],
"forwarded_count": room_key["forwarded_count"],
"is_verified": room_key["is_verified"],
@@ -94,7 +95,16 @@ class EndToEndRoomKeyStore(SQLBaseStore):
},
lock=False,
)
+ log_kv(
+ {
+ "message": "Set room key",
+ "room_id": room_id,
+ "session_id": session_id,
+ "room_key": room_key,
+ }
+ )
+ @trace
@defer.inlineCallbacks
def get_e2e_room_keys(self, user_id, version, room_id=None, session_id=None):
"""Bulk get the E2E room keys for a given backup, optionally filtered to a given
@@ -153,6 +163,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
return sessions
+ @trace
@defer.inlineCallbacks
def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None):
"""Bulk delete the E2E room keys for a given backup, optionally filtered to a given
@@ -236,6 +247,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
"get_e2e_room_keys_version_info", _get_e2e_room_keys_version_info_txn
)
+ @trace
def create_e2e_room_keys_version(self, user_id, info):
"""Atomically creates a new version of this user's e2e_room_keys store
with the given version info.
@@ -276,6 +288,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
"create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn
)
+ @trace
def update_e2e_room_keys_version(self, user_id, version, info):
"""Update a given backup version
@@ -292,6 +305,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
desc="update_e2e_room_keys_version",
)
+ @trace
def delete_e2e_room_keys_version(self, user_id, version=None):
"""Delete a given backup version of the user's room keys.
Doesn't delete their actual key data.
diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py
index 1e07474e70..b6f2538e84 100644
--- a/synapse/storage/end_to_end_keys.py
+++ b/synapse/storage/end_to_end_keys.py
@@ -18,12 +18,14 @@ from canonicaljson import encode_canonical_json
from twisted.internet import defer
+from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.util.caches.descriptors import cached
from ._base import SQLBaseStore, db_to_json
class EndToEndKeyWorkerStore(SQLBaseStore):
+ @trace
@defer.inlineCallbacks
def get_e2e_device_keys(
self, query_list, include_all_devices=False, include_deleted_devices=False
@@ -40,6 +42,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
Dict mapping from user-id to dict mapping from device_id to
dict containing "key_json", "device_display_name".
"""
+ set_tag("query_list", query_list)
if not query_list:
return {}
@@ -57,9 +60,13 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
return results
+ @trace
def _get_e2e_device_keys_txn(
self, txn, query_list, include_all_devices=False, include_deleted_devices=False
):
+ set_tag("include_all_devices", include_all_devices)
+ set_tag("include_deleted_devices", include_deleted_devices)
+
query_clauses = []
query_params = []
@@ -85,7 +92,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
" k.key_json"
" FROM devices d"
" %s JOIN e2e_device_keys_json k USING (user_id, device_id)"
- " WHERE %s"
+ " WHERE %s AND NOT d.hidden"
) % (
"LEFT" if include_all_devices else "INNER",
" OR ".join("(" + q + ")" for q in query_clauses),
@@ -104,6 +111,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
for user_id, device_id in deleted_devices:
result.setdefault(user_id, {})[device_id] = None
+ log_kv(result)
return result
@defer.inlineCallbacks
@@ -129,8 +137,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
keyvalues={"user_id": user_id, "device_id": device_id},
desc="add_e2e_one_time_keys_check",
)
-
- return {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows}
+ result = {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows}
+ log_kv({"message": "Fetched one time keys for user", "one_time_keys": result})
+ return result
@defer.inlineCallbacks
def add_e2e_one_time_keys(self, user_id, device_id, time_now, new_keys):
@@ -146,6 +155,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
"""
def _add_e2e_one_time_keys(txn):
+ set_tag("user_id", user_id)
+ set_tag("device_id", device_id)
+ set_tag("new_keys", new_keys)
# We are protected from race between lookup and insertion due to
# a unique constraint. If there is a race of two calls to
# `add_e2e_one_time_keys` then they'll conflict and we will only
@@ -202,6 +214,11 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
"""
def _set_e2e_device_keys_txn(txn):
+ set_tag("user_id", user_id)
+ set_tag("device_id", device_id)
+ set_tag("time_now", time_now)
+ set_tag("device_keys", device_keys)
+
old_key_json = self._simple_select_one_onecol_txn(
txn,
table="e2e_device_keys_json",
@@ -215,6 +232,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
new_key_json = encode_canonical_json(device_keys).decode("utf-8")
if old_key_json == new_key_json:
+ log_kv({"Message": "Device key already stored."})
return False
self._simple_upsert_txn(
@@ -223,7 +241,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
keyvalues={"user_id": user_id, "device_id": device_id},
values={"ts_added_ms": time_now, "key_json": new_key_json},
)
-
+ log_kv({"message": "Device keys stored."})
return True
return self.runInteraction("set_e2e_device_keys", _set_e2e_device_keys_txn)
@@ -231,6 +249,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
def claim_e2e_one_time_keys(self, query_list):
"""Take a list of one time keys out of the database"""
+ @trace
def _claim_e2e_one_time_keys(txn):
sql = (
"SELECT key_id, key_json FROM e2e_one_time_keys_json"
@@ -252,7 +271,13 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
" AND key_id = ?"
)
for user_id, device_id, algorithm, key_id in delete:
+ log_kv(
+ {
+ "message": "Executing claim e2e_one_time_keys transaction on database."
+ }
+ )
txn.execute(sql, (user_id, device_id, algorithm, key_id))
+ log_kv({"message": "finished executing and invalidating cache"})
self._invalidate_cache_and_stream(
txn, self.count_e2e_one_time_keys, (user_id, device_id)
)
@@ -262,6 +287,13 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
def delete_e2e_keys_by_device(self, user_id, device_id):
def delete_e2e_keys_by_device_txn(txn):
+ log_kv(
+ {
+ "message": "Deleting keys for device",
+ "device_id": device_id,
+ "user_id": user_id,
+ }
+ )
self._simple_delete_txn(
txn,
table="e2e_device_keys_json",
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index ac876287fc..6de125d2f9 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -23,7 +23,7 @@ from functools import wraps
from six import iteritems, text_type
from six.moves import range
-from canonicaljson import json
+from canonicaljson import encode_canonical_json, json
from prometheus_client import Counter, Histogram
from twisted.internet import defer
@@ -33,6 +33,7 @@ from synapse.api.constants import EventTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
+from synapse.events.utils import prune_event_dict
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
from synapse.logging.utils import log_function
from synapse.metrics import BucketCollector
@@ -262,6 +263,14 @@ class EventsStore(
hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000)
+ def _censor_redactions():
+ return run_as_background_process(
+ "_censor_redactions", self._censor_redactions
+ )
+
+ if self.hs.config.redaction_retention_period is not None:
+ hs.get_clock().looping_call(_censor_redactions, 5 * 60 * 1000)
+
@defer.inlineCallbacks
def _read_forward_extremities(self):
def fetch(txn):
@@ -810,7 +819,7 @@ class EventsStore(
# If they old and new groups are the same then we don't need to do
# anything.
if old_state_groups == new_state_groups:
- return (None, None)
+ return None, None
if len(new_state_groups) == 1 and len(old_state_groups) == 1:
# If we're going from one state group to another, lets check if
@@ -827,7 +836,7 @@ class EventsStore(
# the current state in memory then lets also return that,
# but it doesn't matter if we don't.
new_state = state_groups_map.get(new_state_group)
- return (new_state, delta_ids)
+ return new_state, delta_ids
# Now that we have calculated new_state_groups we need to get
# their state IDs so we can resolve to a single state set.
@@ -839,7 +848,7 @@ class EventsStore(
if len(new_state_groups) == 1:
# If there is only one state group, then we know what the current
# state is.
- return (state_groups_map[new_state_groups.pop()], None)
+ return state_groups_map[new_state_groups.pop()], None
# Ok, we need to defer to the state handler to resolve our state sets.
@@ -868,7 +877,7 @@ class EventsStore(
state_res_store=StateResolutionStore(self),
)
- return (res.state, None)
+ return res.state, None
@defer.inlineCallbacks
def _calculate_state_delta(self, room_id, current_state):
@@ -891,7 +900,7 @@ class EventsStore(
if ev_id != existing_state.get(key)
}
- return (to_delete, to_insert)
+ return to_delete, to_insert
@log_function
def _persist_events_txn(
@@ -1302,15 +1311,11 @@ class EventsStore(
"event_reference_hashes",
"event_search",
"event_to_state_groups",
- "guest_access",
- "history_visibility",
"local_invites",
- "room_names",
"state_events",
"rejections",
"redactions",
"room_memberships",
- "topics",
):
txn.executemany(
"DELETE FROM %s WHERE event_id = ?" % (table,),
@@ -1454,10 +1459,10 @@ class EventsStore(
for event, _ in events_and_contexts:
if event.type == EventTypes.Name:
- # Insert into the room_names and event_search tables.
+ # Insert into the event_search table.
self._store_room_name_txn(txn, event)
elif event.type == EventTypes.Topic:
- # Insert into the topics table and event_search table.
+ # Insert into the event_search table.
self._store_room_topic_txn(txn, event)
elif event.type == EventTypes.Message:
# Insert into the event_search table.
@@ -1465,12 +1470,9 @@ class EventsStore(
elif event.type == EventTypes.Redaction:
# Insert into the redactions table.
self._store_redaction(txn, event)
- elif event.type == EventTypes.RoomHistoryVisibility:
- # Insert into the event_search table.
- self._store_history_visibility_txn(txn, event)
- elif event.type == EventTypes.GuestAccess:
- # Insert into the event_search table.
- self._store_guest_access_txn(txn, event)
+ elif event.type == EventTypes.Retention:
+ # Update the room_retention table.
+ self._store_retention_policy_for_room_txn(txn, event)
self._handle_event_relations(txn, event)
@@ -1559,6 +1561,98 @@ class EventsStore(
)
@defer.inlineCallbacks
+ def _censor_redactions(self):
+ """Censors all redactions older than the configured period that haven't
+ been censored yet.
+
+ By censor we mean update the event_json table with the redacted event.
+
+ Returns:
+ Deferred
+ """
+
+ if self.hs.config.redaction_retention_period is None:
+ return
+
+ max_pos = yield self.find_first_stream_ordering_after_ts(
+ self._clock.time_msec() - self.hs.config.redaction_retention_period
+ )
+
+ # We fetch all redactions that:
+ # 1. point to an event we have,
+ # 2. has a stream ordering from before the cut off, and
+ # 3. we haven't yet censored.
+ #
+ # This is limited to 100 events to ensure that we don't try and do too
+ # much at once. We'll get called again so this should eventually catch
+ # up.
+ #
+ # We use the range [-max_pos, max_pos] to handle backfilled events,
+ # which are given negative stream ordering.
+ sql = """
+ SELECT redact_event.event_id, redacts FROM redactions
+ INNER JOIN events AS redact_event USING (event_id)
+ INNER JOIN events AS original_event ON (
+ redact_event.room_id = original_event.room_id
+ AND redacts = original_event.event_id
+ )
+ WHERE NOT have_censored
+ AND ? <= redact_event.stream_ordering AND redact_event.stream_ordering <= ?
+ ORDER BY redact_event.stream_ordering ASC
+ LIMIT ?
+ """
+
+ rows = yield self._execute(
+ "_censor_redactions_fetch", None, sql, -max_pos, max_pos, 100
+ )
+
+ updates = []
+
+ for redaction_id, event_id in rows:
+ redaction_event = yield self.get_event(redaction_id, allow_none=True)
+ original_event = yield self.get_event(
+ event_id, allow_rejected=True, allow_none=True
+ )
+
+ # The SQL above ensures that we have both the redaction and
+ # original event, so if the `get_event` calls return None it
+ # means that the redaction wasn't allowed. Either way we know that
+ # the result won't change so we mark the fact that we've checked.
+ if (
+ redaction_event
+ and original_event
+ and original_event.internal_metadata.is_redacted()
+ ):
+ # Redaction was allowed
+ pruned_json = encode_canonical_json(
+ prune_event_dict(original_event.get_dict())
+ )
+ else:
+ # Redaction wasn't allowed
+ pruned_json = None
+
+ updates.append((redaction_id, event_id, pruned_json))
+
+ def _update_censor_txn(txn):
+ for redaction_id, event_id, pruned_json in updates:
+ if pruned_json:
+ self._simple_update_one_txn(
+ txn,
+ table="event_json",
+ keyvalues={"event_id": event_id},
+ updatevalues={"json": pruned_json},
+ )
+
+ self._simple_update_one_txn(
+ txn,
+ table="redactions",
+ keyvalues={"event_id": redaction_id},
+ updatevalues={"have_censored": True},
+ )
+
+ yield self.runInteraction("_update_censor_txn", _update_censor_txn)
+
+ @defer.inlineCallbacks
def count_daily_messages(self):
"""
Returns an estimate of the number of messages sent in the last day.
@@ -2191,6 +2285,144 @@ class EventsStore(
return to_delete, to_dedelta
+ def purge_room(self, room_id):
+ """Deletes all record of a room
+
+ Args:
+ room_id (str):
+ """
+
+ return self.runInteraction("purge_room", self._purge_room_txn, room_id)
+
+ def _purge_room_txn(self, txn, room_id):
+ # first we have to delete the state groups states
+ logger.info("[purge] removing %s from state_groups_state", room_id)
+
+ txn.execute(
+ """
+ DELETE FROM state_groups_state WHERE state_group IN (
+ SELECT state_group FROM events JOIN event_to_state_groups USING(event_id)
+ WHERE events.room_id=?
+ )
+ """,
+ (room_id,),
+ )
+
+ # ... and the state group edges
+ logger.info("[purge] removing %s from state_group_edges", room_id)
+
+ txn.execute(
+ """
+ DELETE FROM state_group_edges WHERE state_group IN (
+ SELECT state_group FROM events JOIN event_to_state_groups USING(event_id)
+ WHERE events.room_id=?
+ )
+ """,
+ (room_id,),
+ )
+
+ # ... and the state groups
+ logger.info("[purge] removing %s from state_groups", room_id)
+
+ txn.execute(
+ """
+ DELETE FROM state_groups WHERE id IN (
+ SELECT state_group FROM events JOIN event_to_state_groups USING(event_id)
+ WHERE events.room_id=?
+ )
+ """,
+ (room_id,),
+ )
+
+ # and then tables which lack an index on room_id but have one on event_id
+ for table in (
+ "event_auth",
+ "event_edges",
+ "event_push_actions_staging",
+ "event_reference_hashes",
+ "event_relations",
+ "event_to_state_groups",
+ "redactions",
+ "rejections",
+ "state_events",
+ ):
+ logger.info("[purge] removing %s from %s", room_id, table)
+
+ txn.execute(
+ """
+ DELETE FROM %s WHERE event_id IN (
+ SELECT event_id FROM events WHERE room_id=?
+ )
+ """
+ % (table,),
+ (room_id,),
+ )
+
+ # and finally, the tables with an index on room_id (or no useful index)
+ for table in (
+ "current_state_events",
+ "event_backward_extremities",
+ "event_forward_extremities",
+ "event_json",
+ "event_push_actions",
+ "event_search",
+ "events",
+ "group_rooms",
+ "public_room_list_stream",
+ "receipts_graph",
+ "receipts_linearized",
+ "room_aliases",
+ "room_depth",
+ "room_memberships",
+ "room_stats_state",
+ "room_stats_current",
+ "room_stats_historical",
+ "room_stats_earliest_token",
+ "rooms",
+ "stream_ordering_to_exterm",
+ "topics",
+ "users_in_public_rooms",
+ "users_who_share_private_rooms",
+ # no useful index, but let's clear them anyway
+ "appservice_room_list",
+ "e2e_room_keys",
+ "event_push_summary",
+ "pusher_throttle",
+ "group_summary_rooms",
+ "local_invites",
+ "room_account_data",
+ "room_tags",
+ ):
+ logger.info("[purge] removing %s from %s", room_id, table)
+ txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,))
+
+ # Other tables we do NOT need to clear out:
+ #
+ # - blocked_rooms
+ # This is important, to make sure that we don't accidentally rejoin a blocked
+ # room after it was purged
+ #
+ # - user_directory
+ # This has a room_id column, but it is unused
+ #
+
+ # Other tables that we might want to consider clearing out include:
+ #
+ # - event_reports
+ # Given that these are intended for abuse management my initial
+ # inclination is to leave them in place.
+ #
+ # - current_state_delta_stream
+ # - ex_outlier_stream
+ # - room_tags_revisions
+ # The problem with these is that they are largeish and there is no room_id
+ # index on them. In any case we should be clearing out 'stream' tables
+ # periodically anyway (#5888)
+
+ # TODO: we could probably usefully do a bunch of cache invalidation here
+
+ logger.info("[purge] done")
+
@defer.inlineCallbacks
def is_event_after(self, event_id1, event_id2):
"""Returns True if event_id1 is after event_id2 in the stream
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index d20eacda59..e96eed8a6d 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -238,6 +238,13 @@ def _upgrade_existing_database(
logger.debug("applied_delta_files: %s", applied_delta_files)
+ if isinstance(database_engine, PostgresEngine):
+ specific_engine_extension = ".postgres"
+ else:
+ specific_engine_extension = ".sqlite"
+
+ specific_engine_extensions = (".sqlite", ".postgres")
+
for v in range(start_ver, SCHEMA_VERSION + 1):
logger.info("Upgrading schema to v%d", v)
@@ -274,15 +281,22 @@ def _upgrade_existing_database(
# Sometimes .pyc files turn up anyway even though we've
# disabled their generation; e.g. from distribution package
# installers. Silently skip it
- pass
+ continue
elif ext == ".sql":
# A plain old .sql file, just read and execute it
logger.info("Applying schema %s", relative_path)
executescript(cur, absolute_path)
+ elif ext == specific_engine_extension and root_name.endswith(".sql"):
+ # A .sql file specific to our engine; just read and execute it
+ logger.info("Applying engine-specific schema %s", relative_path)
+ executescript(cur, absolute_path)
+ elif ext in specific_engine_extensions and root_name.endswith(".sql"):
+ # A .sql file for a different engine; skip it.
+ continue
else:
# Not a valid delta file.
- logger.warn(
- "Found directory entry that did not end in .py or" " .sql: %s",
+ logger.warning(
+ "Found directory entry that did not end in .py or .sql: %s",
relative_path,
)
continue
@@ -290,7 +304,7 @@ def _upgrade_existing_database(
# Mark as done.
cur.execute(
database_engine.convert_param_style(
- "INSERT INTO applied_schema_deltas (version, file)" " VALUES (?,?)"
+ "INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)"
),
(v, relative_path),
)
@@ -298,7 +312,7 @@ def _upgrade_existing_database(
cur.execute("DELETE FROM schema_version")
cur.execute(
database_engine.convert_param_style(
- "INSERT INTO schema_version (version, upgraded)" " VALUES (?,?)"
+ "INSERT INTO schema_version (version, upgraded) VALUES (?,?)"
),
(v, True),
)
diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py
index 1a0f2d5768..5db6f2d84a 100644
--- a/synapse/storage/presence.py
+++ b/synapse/storage/presence.py
@@ -90,7 +90,7 @@ class PresenceStore(SQLBaseStore):
presence_states,
)
- return (stream_orderings[-1], self._presence_id_gen.get_current_token())
+ return stream_orderings[-1], self._presence_id_gen.get_current_token()
def _update_presence_txn(self, txn, stream_orderings, presence_states):
for stream_id, state in zip(stream_orderings, presence_states):
diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py
index 8a5d8e9b18..0a36c9cb34 100644
--- a/synapse/storage/profile.py
+++ b/synapse/storage/profile.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,8 +19,11 @@ from twisted.internet import defer
from synapse.api.errors import StoreError
from synapse.storage.roommember import ProfileInfo
+from . import background_updates
from ._base import SQLBaseStore
+BATCH_SIZE = 100
+
class ProfileWorkerStore(SQLBaseStore):
@defer.inlineCallbacks
@@ -35,7 +39,6 @@ class ProfileWorkerStore(SQLBaseStore):
if e.code == 404:
# no match
return ProfileInfo(None, None)
- return
else:
raise
@@ -59,6 +62,54 @@ class ProfileWorkerStore(SQLBaseStore):
desc="get_profile_avatar_url",
)
+ def get_latest_profile_replication_batch_number(self):
+ def f(txn):
+ txn.execute("SELECT MAX(batch) as maxbatch FROM profiles")
+ rows = self.cursor_to_dict(txn)
+ return rows[0]["maxbatch"]
+
+ return self.runInteraction("get_latest_profile_replication_batch_number", f)
+
+ def get_profile_batch(self, batchnum):
+ return self._simple_select_list(
+ table="profiles",
+ keyvalues={"batch": batchnum},
+ retcols=("user_id", "displayname", "avatar_url", "active"),
+ desc="get_profile_batch",
+ )
+
+ def assign_profile_batch(self):
+ def f(txn):
+ sql = (
+ "UPDATE profiles SET batch = "
+ "(SELECT COALESCE(MAX(batch), -1) + 1 FROM profiles) "
+ "WHERE user_id in ("
+ " SELECT user_id FROM profiles WHERE batch is NULL limit ?"
+ ")"
+ )
+ txn.execute(sql, (BATCH_SIZE,))
+ return txn.rowcount
+
+ return self.runInteraction("assign_profile_batch", f)
+
+ def get_replication_hosts(self):
+ def f(txn):
+ txn.execute(
+ "SELECT host, last_synced_batch FROM profile_replication_status"
+ )
+ rows = self.cursor_to_dict(txn)
+ return {r["host"]: r["last_synced_batch"] for r in rows}
+
+ return self.runInteraction("get_replication_hosts", f)
+
+ def update_replication_batch_for_host(self, host, last_synced_batch):
+ return self._simple_upsert(
+ table="profile_replication_status",
+ keyvalues={"host": host},
+ values={"last_synced_batch": last_synced_batch},
+ desc="update_replication_batch_for_host",
+ )
+
def get_from_remote_profile_cache(self, user_id):
return self._simple_select_one(
table="remote_profile_cache",
@@ -68,29 +119,53 @@ class ProfileWorkerStore(SQLBaseStore):
desc="get_from_remote_profile_cache",
)
- def create_profile(self, user_localpart):
- return self._simple_insert(
- table="profiles", values={"user_id": user_localpart}, desc="create_profile"
- )
-
- def set_profile_displayname(self, user_localpart, new_displayname):
- return self._simple_update_one(
+ def set_profile_displayname(self, user_localpart, new_displayname, batchnum):
+ return self._simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
- updatevalues={"displayname": new_displayname},
+ values={"displayname": new_displayname, "batch": batchnum},
desc="set_profile_displayname",
+ lock=False, # we can do this because user_id has a unique index
)
- def set_profile_avatar_url(self, user_localpart, new_avatar_url):
- return self._simple_update_one(
+ def set_profile_avatar_url(self, user_localpart, new_avatar_url, batchnum):
+ return self._simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
- updatevalues={"avatar_url": new_avatar_url},
+ values={"avatar_url": new_avatar_url, "batch": batchnum},
desc="set_profile_avatar_url",
+ lock=False, # we can do this because user_id has a unique index
+ )
+
+ def set_profile_active(self, user_localpart, active, hide, batchnum):
+ values = {"active": int(active), "batch": batchnum}
+ if not active and not hide:
+ # we are deactivating for real (not in hide mode)
+ # so clear the profile.
+ values["avatar_url"] = None
+ values["displayname"] = None
+ return self._simple_upsert(
+ table="profiles",
+ keyvalues={"user_id": user_localpart},
+ values=values,
+ desc="set_profile_active",
+ lock=False, # we can do this because user_id has a unique index
)
-class ProfileStore(ProfileWorkerStore):
+class ProfileStore(ProfileWorkerStore, background_updates.BackgroundUpdateStore):
+ def __init__(self, db_conn, hs):
+
+ super(ProfileStore, self).__init__(db_conn, hs)
+
+ self.register_background_index_update(
+ "profile_replication_status_host_index",
+ index_name="profile_replication_status_idx",
+ table="profile_replication_status",
+ columns=["host"],
+ unique=True,
+ )
+
def add_remote_profile_cache(self, user_id, displayname, avatar_url):
"""Ensure we are caching the remote user's profiles.
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index b431d24b8a..3e0e834a62 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -133,7 +133,7 @@ class PusherWorkerStore(SQLBaseStore):
txn.execute(sql, (last_id, current_id, limit))
deleted = txn.fetchall()
- return (updated, deleted)
+ return updated, deleted
return self.runInteraction(
"get_all_updated_pushers", get_all_updated_pushers_txn
diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py
index 6aa6d98ebb..290ddb30e8 100644
--- a/synapse/storage/receipts.py
+++ b/synapse/storage/receipts.py
@@ -478,7 +478,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
max_persisted_id = self._receipts_id_gen.get_current_token()
- return (stream_id, max_persisted_id)
+ return stream_id, max_persisted_id
def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, data):
return self.runInteraction(
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 55e4e84d71..f0932d8317 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -56,6 +56,7 @@ class RegistrationWorkerStore(SQLBaseStore):
"consent_server_notice_sent",
"appservice_id",
"creation_ts",
+ "user_type",
],
allow_none=True,
desc="get_user_by_id",
@@ -155,6 +156,28 @@ class RegistrationWorkerStore(SQLBaseStore):
)
@defer.inlineCallbacks
+ def get_expired_users(self):
+ """Get IDs of all expired users
+
+ Returns:
+ Deferred[list[str]]: List of expired user IDs
+ """
+
+ def get_expired_users_txn(txn, now_ms):
+ sql = """
+ SELECT user_id from account_validity
+ WHERE expiration_ts_ms <= ?
+ """
+ txn.execute(sql, (now_ms,))
+ rows = txn.fetchall()
+ return [row[0] for row in rows]
+
+ res = yield self.runInteraction(
+ "get_expired_users", get_expired_users_txn, self.clock.time_msec()
+ )
+ defer.returnValue(res)
+
+ @defer.inlineCallbacks
def set_renewal_token_for_user(self, user_id, renewal_token):
"""Defines a renewal token for a given user.
@@ -272,6 +295,14 @@ class RegistrationWorkerStore(SQLBaseStore):
@defer.inlineCallbacks
def is_server_admin(self, user):
+ """Determines if a user is an admin of this homeserver.
+
+ Args:
+ user (UserID): user ID of the user to test
+
+ Returns (bool):
+ true iff the user is a server admin, false otherwise.
+ """
res = yield self._simple_select_one_onecol(
table="users",
keyvalues={"name": user.to_string()},
@@ -282,6 +313,21 @@ class RegistrationWorkerStore(SQLBaseStore):
return res if res else False
+ def set_server_admin(self, user, admin):
+ """Sets whether a user is an admin of this homeserver.
+
+ Args:
+ user (UserID): user ID of the user to test
+ admin (bool): true iff the user is to be a server admin,
+ false otherwise.
+ """
+ return self._simple_update_one(
+ table="users",
+ keyvalues={"name": user.to_string()},
+ updatevalues={"admin": 1 if admin else 0},
+ desc="set_server_admin",
+ )
+
def _query_for_auth(self, txn, token):
sql = (
"SELECT users.name, users.is_guest, access_tokens.id as token_id,"
@@ -299,6 +345,19 @@ class RegistrationWorkerStore(SQLBaseStore):
return None
@cachedInlineCallbacks()
+ def is_real_user(self, user_id):
+ """Determines if the user is a real user, ie does not have a 'user_type'.
+
+ Args:
+ user_id (str): user id to test
+
+ Returns:
+ Deferred[bool]: True if user 'user_type' is null or empty string
+ """
+ res = yield self.runInteraction("is_real_user", self.is_real_user_txn, user_id)
+ return res
+
+ @cachedInlineCallbacks()
def is_support_user(self, user_id):
"""Determines if the user is of type UserTypes.SUPPORT
@@ -313,6 +372,16 @@ class RegistrationWorkerStore(SQLBaseStore):
)
return res
+ def is_real_user_txn(self, txn, user_id):
+ res = self._simple_select_one_onecol_txn(
+ txn=txn,
+ table="users",
+ keyvalues={"name": user_id},
+ retcol="user_type",
+ allow_none=True,
+ )
+ return res is None
+
def is_support_user_txn(self, txn, user_id):
res = self._simple_select_one_onecol_txn(
txn=txn,
@@ -398,6 +467,20 @@ class RegistrationWorkerStore(SQLBaseStore):
return ret
@defer.inlineCallbacks
+ def count_real_users(self):
+ """Counts all users without a special user_type registered on the homeserver."""
+
+ def _count_users(txn):
+ txn.execute("SELECT COUNT(*) AS users FROM users where user_type is null")
+ rows = self.cursor_to_dict(txn)
+ if rows:
+ return rows[0]["users"]
+ return 0
+
+ ret = yield self.runInteraction("count_real_users", _count_users)
+ return ret
+
+ @defer.inlineCallbacks
def find_next_generated_user_id_localpart(self):
"""
Gets the localpart of the next generated user ID.
@@ -590,6 +673,85 @@ class RegistrationWorkerStore(SQLBaseStore):
# Convert the integer into a boolean.
return res == 1
+ def get_threepid_validation_session(
+ self, medium, client_secret, address=None, sid=None, validated=True
+ ):
+ """Gets a session_id and last_send_attempt (if available) for a
+ client_secret/medium/(address|session_id) combo
+
+ Args:
+ medium (str|None): The medium of the 3PID
+ address (str|None): The address of the 3PID
+ sid (str|None): The ID of the validation session
+ client_secret (str|None): A unique string provided by the client to
+ help identify this validation attempt
+ validated (bool|None): Whether sessions should be filtered by
+ whether they have been validated already or not. None to
+ perform no filtering
+
+ Returns:
+ deferred {str, int}|None: A dict containing the
+ latest session_id and send_attempt count for this 3PID.
+ Otherwise None if there hasn't been a previous attempt
+ """
+ keyvalues = {"medium": medium, "client_secret": client_secret}
+ if address:
+ keyvalues["address"] = address
+ if sid:
+ keyvalues["session_id"] = sid
+
+ assert address or sid
+
+ def get_threepid_validation_session_txn(txn):
+ sql = """
+ SELECT address, session_id, medium, client_secret,
+ last_send_attempt, validated_at
+ FROM threepid_validation_session WHERE %s
+ """ % (
+ " AND ".join("%s = ?" % k for k in iterkeys(keyvalues)),
+ )
+
+ if validated is not None:
+ sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL")
+
+ sql += " LIMIT 1"
+
+ txn.execute(sql, list(keyvalues.values()))
+ rows = self.cursor_to_dict(txn)
+ if not rows:
+ return None
+
+ return rows[0]
+
+ return self.runInteraction(
+ "get_threepid_validation_session", get_threepid_validation_session_txn
+ )
+
+ def delete_threepid_session(self, session_id):
+ """Removes a threepid validation session from the database. This can
+ be done after validation has been performed and whatever action was
+ waiting on it has been carried out
+
+ Args:
+ session_id (str): The ID of the session to delete
+ """
+
+ def delete_threepid_session_txn(txn):
+ self._simple_delete_txn(
+ txn,
+ table="threepid_validation_token",
+ keyvalues={"session_id": session_id},
+ )
+ self._simple_delete_txn(
+ txn,
+ table="threepid_validation_session",
+ keyvalues={"session_id": session_id},
+ )
+
+ return self.runInteraction(
+ "delete_threepid_session", delete_threepid_session_txn
+ )
+
class RegistrationStore(
RegistrationWorkerStore, background_updates.BackgroundUpdateStore
@@ -845,6 +1007,17 @@ class RegistrationStore(
(user_id_obj.localpart, create_profile_with_displayname),
)
+ if self.hs.config.stats_enabled:
+ # we create a new completed user statistics row
+
+ # we don't strictly need current_token since this user really can't
+ # have any state deltas before now (as it is a new user), but still,
+ # we include it for completeness.
+ current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn)
+ self._update_stats_delta_txn(
+ txn, now, "user", user_id, {}, complete_with_stream_id=current_token
+ )
+
self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
txn.call_after(self.is_guest.invalidate, (user_id,))
@@ -1047,60 +1220,6 @@ class RegistrationStore(
return 1
- def get_threepid_validation_session(
- self, medium, client_secret, address=None, sid=None, validated=True
- ):
- """Gets a session_id and last_send_attempt (if available) for a
- client_secret/medium/(address|session_id) combo
-
- Args:
- medium (str|None): The medium of the 3PID
- address (str|None): The address of the 3PID
- sid (str|None): The ID of the validation session
- client_secret (str|None): A unique string provided by the client to
- help identify this validation attempt
- validated (bool|None): Whether sessions should be filtered by
- whether they have been validated already or not. None to
- perform no filtering
-
- Returns:
- deferred {str, int}|None: A dict containing the
- latest session_id and send_attempt count for this 3PID.
- Otherwise None if there hasn't been a previous attempt
- """
- keyvalues = {"medium": medium, "client_secret": client_secret}
- if address:
- keyvalues["address"] = address
- if sid:
- keyvalues["session_id"] = sid
-
- assert address or sid
-
- def get_threepid_validation_session_txn(txn):
- sql = """
- SELECT address, session_id, medium, client_secret,
- last_send_attempt, validated_at
- FROM threepid_validation_session WHERE %s
- """ % (
- " AND ".join("%s = ?" % k for k in iterkeys(keyvalues)),
- )
-
- if validated is not None:
- sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL")
-
- sql += " LIMIT 1"
-
- txn.execute(sql, list(keyvalues.values()))
- rows = self.cursor_to_dict(txn)
- if not rows:
- return None
-
- return rows[0]
-
- return self.runInteraction(
- "get_threepid_validation_session", get_threepid_validation_session_txn
- )
-
def validate_threepid_session(self, session_id, client_secret, token, current_ts):
"""Attempt to validate a threepid session using a token
@@ -1116,6 +1235,7 @@ class RegistrationStore(
deferred str|None: A str representing a link to redirect the user
to if there is one.
"""
+
# Insert everything into a transaction in order to run atomically
def validate_threepid_session_txn(txn):
row = self._simple_select_one_txn(
@@ -1287,31 +1407,6 @@ class RegistrationStore(
self.clock.time_msec(),
)
- def delete_threepid_session(self, session_id):
- """Removes a threepid validation session from the database. This can
- be done after validation has been performed and whatever action was
- waiting on it has been carried out
-
- Args:
- session_id (str): The ID of the session to delete
- """
-
- def delete_threepid_session_txn(txn):
- self._simple_delete_txn(
- txn,
- table="threepid_validation_token",
- keyvalues={"session_id": session_id},
- )
- self._simple_delete_txn(
- txn,
- table="threepid_validation_session",
- keyvalues={"session_id": session_id},
- )
-
- return self.runInteraction(
- "delete_threepid_session", delete_threepid_session_txn
- )
-
def set_user_deactivated_status_txn(self, txn, user_id, deactivated):
self._simple_update_one_txn(
txn=txn,
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index bc606292b8..89d6ecd23f 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -17,10 +17,13 @@ import collections
import logging
import re
+from six import integer_types
+
from canonicaljson import json
from twisted.internet import defer
+from synapse.api.constants import EventTypes
from synapse.api.errors import StoreError
from synapse.storage._base import SQLBaseStore
from synapse.storage.search import SearchStore
@@ -171,6 +174,24 @@ class RoomWorkerStore(SQLBaseStore):
desc="is_room_blocked",
)
+ @defer.inlineCallbacks
+ def is_room_published(self, room_id):
+ """Check whether a room has been published in the local public room
+ directory.
+
+ Args:
+ room_id (str)
+ Returns:
+ bool: Whether the room is currently published in the room directory
+ """
+ # Get room information
+ room_info = yield self.get_room(room_id)
+ if not room_info:
+ defer.returnValue(False)
+
+ # Check the is_public value
+ defer.returnValue(room_info.get("is_public", False))
+
@cachedInlineCallbacks(max_entries=10000)
def get_ratelimit_for_user(self, user_id):
"""Check if there are any overrides for ratelimiting for the given
@@ -200,8 +221,146 @@ class RoomWorkerStore(SQLBaseStore):
else:
return None
+ @cachedInlineCallbacks()
+ def get_retention_policy_for_room(self, room_id):
+ """Get the retention policy for a given room.
+
+ If no retention policy has been found for this room, returns a policy defined
+ by the configured default policy (which has None as both the 'min_lifetime' and
+ the 'max_lifetime' if no default policy has been defined in the server's
+ configuration).
+
+ Args:
+ room_id (str): The ID of the room to get the retention policy of.
+
+ Returns:
+ dict[int, int]: "min_lifetime" and "max_lifetime" for this room.
+ """
+ # If the room retention feature is disabled, return a policy with no minimum nor
+ # maximum, in order not to filter out events we should filter out when sending to
+ # the client.
+ if not self.config.retention_enabled:
+ defer.returnValue({"min_lifetime": None, "max_lifetime": None})
+
+ def get_retention_policy_for_room_txn(txn):
+ txn.execute(
+ """
+ SELECT min_lifetime, max_lifetime FROM room_retention
+ INNER JOIN current_state_events USING (event_id, room_id)
+ WHERE room_id = ?;
+ """,
+ (room_id,),
+ )
+
+ return self.cursor_to_dict(txn)
+
+ ret = yield self.runInteraction(
+ "get_retention_policy_for_room", get_retention_policy_for_room_txn
+ )
+
+ # If we don't know this room ID, ret will be None, in this case return the default
+ # policy.
+ if not ret:
+ defer.returnValue(
+ {
+ "min_lifetime": self.config.retention_default_min_lifetime,
+ "max_lifetime": self.config.retention_default_max_lifetime,
+ }
+ )
+
+ row = ret[0]
+
+ # If one of the room's policy's attributes isn't defined, use the matching
+ # attribute from the default policy.
+ # The default values will be None if no default policy has been defined, or if one
+ # of the attributes is missing from the default policy.
+ if row["min_lifetime"] is None:
+ row["min_lifetime"] = self.config.retention_default_min_lifetime
+
+ if row["max_lifetime"] is None:
+ row["max_lifetime"] = self.config.retention_default_max_lifetime
+
+ defer.returnValue(row)
+
class RoomStore(RoomWorkerStore, SearchStore):
+ def __init__(self, db_conn, hs):
+ super(RoomStore, self).__init__(db_conn, hs)
+
+ self.config = hs.config
+
+ self.register_background_update_handler(
+ "insert_room_retention", self._background_insert_retention
+ )
+
+ @defer.inlineCallbacks
+ def _background_insert_retention(self, progress, batch_size):
+ """Retrieves a list of all rooms within a range and inserts an entry for each of
+ them into the room_retention table.
+ NULLs the property's columns if missing from the retention event in the room's
+ state (or NULLs all of them if there's no retention event in the room's state),
+ so that we fall back to the server's retention policy.
+ """
+
+ last_room = progress.get("room_id", "")
+
+ def _background_insert_retention_txn(txn):
+ txn.execute(
+ """
+ SELECT state.room_id, state.event_id, events.json
+ FROM current_state_events as state
+ LEFT JOIN event_json AS events ON (state.event_id = events.event_id)
+ WHERE state.room_id > ? AND state.type = '%s'
+ ORDER BY state.room_id ASC
+ LIMIT ?;
+ """
+ % EventTypes.Retention,
+ (last_room, batch_size),
+ )
+
+ rows = self.cursor_to_dict(txn)
+
+ if not rows:
+ return True
+
+ for row in rows:
+ if not row["json"]:
+ retention_policy = {}
+ else:
+ ev = json.loads(row["json"])
+ retention_policy = json.dumps(ev["content"])
+
+ self._simple_insert_txn(
+ txn=txn,
+ table="room_retention",
+ values={
+ "room_id": row["room_id"],
+ "event_id": row["event_id"],
+ "min_lifetime": retention_policy.get("min_lifetime"),
+ "max_lifetime": retention_policy.get("max_lifetime"),
+ },
+ )
+
+ logger.info("Inserted %d rows into room_retention", len(rows))
+
+ self._background_update_progress_txn(
+ txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]}
+ )
+
+ if batch_size > len(rows):
+ return True
+ else:
+ return False
+
+ end = yield self.runInteraction(
+ "insert_room_retention", _background_insert_retention_txn
+ )
+
+ if end:
+ yield self._end_background_update("insert_room_retention")
+
+ defer.returnValue(batch_size)
+
@defer.inlineCallbacks
def store_room(self, room_id, room_creator_user_id, is_public):
"""Stores a room.
@@ -386,32 +545,12 @@ class RoomStore(RoomWorkerStore, SearchStore):
def _store_room_topic_txn(self, txn, event):
if hasattr(event, "content") and "topic" in event.content:
- self._simple_insert_txn(
- txn,
- "topics",
- {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "topic": event.content["topic"],
- },
- )
-
self.store_event_search_txn(
txn, event, "content.topic", event.content["topic"]
)
def _store_room_name_txn(self, txn, event):
if hasattr(event, "content") and "name" in event.content:
- self._simple_insert_txn(
- txn,
- "room_names",
- {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "name": event.content["name"],
- },
- )
-
self.store_event_search_txn(
txn, event, "content.name", event.content["name"]
)
@@ -422,20 +561,34 @@ class RoomStore(RoomWorkerStore, SearchStore):
txn, event, "content.body", event.content["body"]
)
- def _store_history_visibility_txn(self, txn, event):
- self._store_content_index_txn(txn, event, "history_visibility")
+ def _store_retention_policy_for_room_txn(self, txn, event):
+ if hasattr(event, "content") and (
+ "min_lifetime" in event.content or "max_lifetime" in event.content
+ ):
+ if (
+ "min_lifetime" in event.content
+ and not isinstance(event.content.get("min_lifetime"), integer_types)
+ ) or (
+ "max_lifetime" in event.content
+ and not isinstance(event.content.get("max_lifetime"), integer_types)
+ ):
+ # Ignore the event if one of the value isn't an integer.
+ return
- def _store_guest_access_txn(self, txn, event):
- self._store_content_index_txn(txn, event, "guest_access")
+ self._simple_insert_txn(
+ txn=txn,
+ table="room_retention",
+ values={
+ "room_id": event.room_id,
+ "event_id": event.event_id,
+ "min_lifetime": event.content.get("min_lifetime"),
+ "max_lifetime": event.content.get("max_lifetime"),
+ },
+ )
- def _store_content_index_txn(self, txn, event, key):
- if hasattr(event, "content") and key in event.content:
- sql = (
- "INSERT INTO %(key)s"
- " (event_id, room_id, %(key)s)"
- " VALUES (?, ?, ?)" % {"key": key}
+ self._invalidate_cache_and_stream(
+ txn, self.get_retention_policy_for_room, (event.room_id,)
)
- txn.execute(sql, (event.event_id, event.room_id, event.content[key]))
def add_event_report(
self, room_id, event_id, user_id, reason, content, received_ts
@@ -618,3 +771,89 @@ class RoomStore(RoomWorkerStore, SearchStore):
remote_media_mxcs.append((hostname, media_id))
return local_media_mxcs, remote_media_mxcs
+
+ @defer.inlineCallbacks
+ def get_rooms_for_retention_period_in_range(
+ self, min_ms, max_ms, include_null=False
+ ):
+ """Retrieves all of the rooms within the given retention range.
+
+ Optionally includes the rooms which don't have a retention policy.
+
+ Args:
+ min_ms (int|None): Duration in milliseconds that define the lower limit of
+ the range to handle (exclusive). If None, doesn't set a lower limit.
+ max_ms (int|None): Duration in milliseconds that define the upper limit of
+ the range to handle (inclusive). If None, doesn't set an upper limit.
+ include_null (bool): Whether to include rooms which retention policy is NULL
+ in the returned set.
+
+ Returns:
+ dict[str, dict]: The rooms within this range, along with their retention
+ policy. The key is "room_id", and maps to a dict describing the retention
+ policy associated with this room ID. The keys for this nested dict are
+ "min_lifetime" (int|None), and "max_lifetime" (int|None).
+ """
+
+ def get_rooms_for_retention_period_in_range_txn(txn):
+ range_conditions = []
+ args = []
+
+ if min_ms is not None:
+ range_conditions.append("max_lifetime > ?")
+ args.append(min_ms)
+
+ if max_ms is not None:
+ range_conditions.append("max_lifetime <= ?")
+ args.append(max_ms)
+
+ # Do a first query which will retrieve the rooms that have a retention policy
+ # in their current state.
+ sql = """
+ SELECT room_id, min_lifetime, max_lifetime FROM room_retention
+ INNER JOIN current_state_events USING (event_id, room_id)
+ """
+
+ if len(range_conditions):
+ sql += " WHERE (" + " AND ".join(range_conditions) + ")"
+
+ if include_null:
+ sql += " OR max_lifetime IS NULL"
+
+ txn.execute(sql, args)
+
+ rows = self.cursor_to_dict(txn)
+ rooms_dict = {}
+
+ for row in rows:
+ rooms_dict[row["room_id"]] = {
+ "min_lifetime": row["min_lifetime"],
+ "max_lifetime": row["max_lifetime"],
+ }
+
+ if include_null:
+ # If required, do a second query that retrieves all of the rooms we know
+ # of so we can handle rooms with no retention policy.
+ sql = "SELECT DISTINCT room_id FROM current_state_events"
+
+ txn.execute(sql)
+
+ rows = self.cursor_to_dict(txn)
+
+ # If a room isn't already in the dict (i.e. it doesn't have a retention
+ # policy in its state), add it with a null policy.
+ for row in rows:
+ if row["room_id"] not in rooms_dict:
+ rooms_dict[row["room_id"]] = {
+ "min_lifetime": None,
+ "max_lifetime": None,
+ }
+
+ return rooms_dict
+
+ rooms = yield self.runInteraction(
+ "get_rooms_for_retention_period_in_range",
+ get_rooms_for_retention_period_in_range_txn,
+ )
+
+ defer.returnValue(rooms)
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index eecb276465..4df8ebdacd 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -24,8 +24,10 @@ from canonicaljson import json
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
+from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import LoggingTransaction
+from synapse.storage.engines import Sqlite3Engine
from synapse.storage.events_worker import EventsWorkerStore
from synapse.types import get_domain_from_id
from synapse.util.async_helpers import Linearizer
@@ -74,6 +76,63 @@ class RoomMemberWorkerStore(EventsWorkerStore):
self._check_safe_current_state_events_membership_updated_txn(txn)
txn.close()
+ if self.hs.config.metrics_flags.known_servers:
+ self._known_servers_count = 1
+ self.hs.get_clock().looping_call(
+ run_as_background_process,
+ 60 * 1000,
+ "_count_known_servers",
+ self._count_known_servers,
+ )
+ self.hs.get_clock().call_later(
+ 1000,
+ run_as_background_process,
+ "_count_known_servers",
+ self._count_known_servers,
+ )
+ LaterGauge(
+ "synapse_federation_known_servers",
+ "",
+ [],
+ lambda: self._known_servers_count,
+ )
+
+ @defer.inlineCallbacks
+ def _count_known_servers(self):
+ """
+ Count the servers that this server knows about.
+
+ The statistic is stored on the class for the
+ `synapse_federation_known_servers` LaterGauge to collect.
+ """
+
+ def _transact(txn):
+ if isinstance(self.database_engine, Sqlite3Engine):
+ query = """
+ SELECT COUNT(DISTINCT substr(out.user_id, pos+1))
+ FROM (
+ SELECT rm.user_id as user_id, instr(rm.user_id, ':')
+ AS pos FROM room_memberships as rm
+ INNER JOIN current_state_events as c ON rm.event_id = c.event_id
+ WHERE c.type = 'm.room.member'
+ ) as out
+ """
+ else:
+ query = """
+ SELECT COUNT(DISTINCT split_part(state_key, ':', 2))
+ FROM current_state_events
+ WHERE type = 'm.room.member' AND membership = 'join';
+ """
+ txn.execute(query)
+ return list(txn)[0][0]
+
+ count = yield self.runInteraction("get_known_servers", _transact)
+
+ # We always know about ourselves, even if we have nothing in
+ # room_memberships (for example, the server is new).
+ self._known_servers_count = max([count, 1])
+ return self._known_servers_count
+
def _check_safe_current_state_events_membership_updated_txn(self, txn):
"""Checks if it is safe to assume the new current_state_events
membership column is up to date
@@ -112,29 +171,31 @@ class RoomMemberWorkerStore(EventsWorkerStore):
@cached(max_entries=100000, iterable=True)
def get_users_in_room(self, room_id):
- def f(txn):
- # If we can assume current_state_events.membership is up to date
- # then we can avoid a join, which is a Very Good Thing given how
- # frequently this function gets called.
- if self._current_state_events_membership_up_to_date:
- sql = """
- SELECT state_key FROM current_state_events
- WHERE type = 'm.room.member' AND room_id = ? AND membership = ?
- """
- else:
- sql = """
- SELECT state_key FROM room_memberships as m
- INNER JOIN current_state_events as c
- ON m.event_id = c.event_id
- AND m.room_id = c.room_id
- AND m.user_id = c.state_key
- WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?
- """
+ return self.runInteraction(
+ "get_users_in_room", self.get_users_in_room_txn, room_id
+ )
- txn.execute(sql, (room_id, Membership.JOIN))
- return [to_ascii(r[0]) for r in txn]
+ def get_users_in_room_txn(self, txn, room_id):
+ # If we can assume current_state_events.membership is up to date
+ # then we can avoid a join, which is a Very Good Thing given how
+ # frequently this function gets called.
+ if self._current_state_events_membership_up_to_date:
+ sql = """
+ SELECT state_key FROM current_state_events
+ WHERE type = 'm.room.member' AND room_id = ? AND membership = ?
+ """
+ else:
+ sql = """
+ SELECT state_key FROM room_memberships as m
+ INNER JOIN current_state_events as c
+ ON m.event_id = c.event_id
+ AND m.room_id = c.room_id
+ AND m.user_id = c.state_key
+ WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?
+ """
- return self.runInteraction("get_users_in_room", f)
+ txn.execute(sql, (room_id, Membership.JOIN))
+ return [to_ascii(r[0]) for r in txn]
@cached(max_entries=100000)
def get_room_summary(self, room_id):
diff --git a/synapse/storage/schema/delta/48/profiles_batch.sql b/synapse/storage/schema/delta/48/profiles_batch.sql
new file mode 100644
index 0000000000..e744c02fe8
--- /dev/null
+++ b/synapse/storage/schema/delta/48/profiles_batch.sql
@@ -0,0 +1,36 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Add a batch number to track changes to profiles and the
+ * order they're made in so we can replicate user profiles
+ * to other hosts as they change
+ */
+ALTER TABLE profiles ADD COLUMN batch BIGINT DEFAULT NULL;
+
+/*
+ * Index on the batch number so we can get profiles
+ * by their batch
+ */
+CREATE INDEX profiles_batch_idx ON profiles(batch);
+
+/*
+ * A table to track what batch of user profiles has been
+ * synced to what profile replication target.
+ */
+CREATE TABLE profile_replication_status (
+ host TEXT NOT NULL,
+ last_synced_batch BIGINT NOT NULL
+);
diff --git a/synapse/storage/schema/delta/50/profiles_deactivated_users.sql b/synapse/storage/schema/delta/50/profiles_deactivated_users.sql
new file mode 100644
index 0000000000..c8893ecbe8
--- /dev/null
+++ b/synapse/storage/schema/delta/50/profiles_deactivated_users.sql
@@ -0,0 +1,23 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * A flag saying whether the user owning the profile has been deactivated
+ * This really belongs on the users table, not here, but the users table
+ * stores users by their full user_id and profiles stores them by localpart,
+ * so we can't easily join between the two tables. Plus, the batch number
+ * realy ought to represent data in this table that has changed.
+ */
+ALTER TABLE profiles ADD COLUMN active SMALLINT DEFAULT 1 NOT NULL;
diff --git a/synapse/storage/schema/delta/55/profile_replication_status_index.sql b/synapse/storage/schema/delta/55/profile_replication_status_index.sql
new file mode 100644
index 0000000000..18a0f7e10c
--- /dev/null
+++ b/synapse/storage/schema/delta/55/profile_replication_status_index.sql
@@ -0,0 +1,17 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+ ('profile_replication_status_host_index', '{}');
diff --git a/synapse/storage/schema/delta/55/room_retention.sql b/synapse/storage/schema/delta/55/room_retention.sql
new file mode 100644
index 0000000000..ee6cdf7a14
--- /dev/null
+++ b/synapse/storage/schema/delta/55/room_retention.sql
@@ -0,0 +1,33 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Tracks the retention policy of a room.
+-- A NULL max_lifetime or min_lifetime means that the matching property is not defined in
+-- the room's retention policy state event.
+-- If a room doesn't have a retention policy state event in its state, both max_lifetime
+-- and min_lifetime are NULL.
+CREATE TABLE IF NOT EXISTS room_retention(
+ room_id TEXT,
+ event_id TEXT,
+ min_lifetime BIGINT,
+ max_lifetime BIGINT,
+
+ PRIMARY KEY(room_id, event_id)
+);
+
+CREATE INDEX room_retention_max_lifetime_idx on room_retention(max_lifetime);
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+ ('insert_room_retention', '{}');
diff --git a/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql b/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql
new file mode 100644
index 0000000000..41807eb1e7
--- /dev/null
+++ b/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql
@@ -0,0 +1,20 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Opentracing context data for inclusion in the device_list_update EDUs, as a
+ * json-encoded dictionary. NULL if opentracing is disabled (or not enabled for this destination).
+ */
+ALTER TABLE device_lists_outbound_pokes ADD opentracing_context TEXT;
diff --git a/synapse/storage/schema/delta/56/drop_unused_event_tables.sql b/synapse/storage/schema/delta/56/drop_unused_event_tables.sql
new file mode 100644
index 0000000000..9f09922c67
--- /dev/null
+++ b/synapse/storage/schema/delta/56/drop_unused_event_tables.sql
@@ -0,0 +1,20 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- these tables are never used.
+DROP TABLE IF EXISTS room_names;
+DROP TABLE IF EXISTS topics;
+DROP TABLE IF EXISTS history_visibility;
+DROP TABLE IF EXISTS guest_access;
diff --git a/synapse/storage/schema/delta/56/fix_room_keys_index.sql b/synapse/storage/schema/delta/56/fix_room_keys_index.sql
new file mode 100644
index 0000000000..014cb3b538
--- /dev/null
+++ b/synapse/storage/schema/delta/56/fix_room_keys_index.sql
@@ -0,0 +1,18 @@
+/* Copyright 2019 Matrix.org Foundation CIC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- version is supposed to be part of the room keys index
+CREATE UNIQUE INDEX e2e_room_keys_with_version_idx ON e2e_room_keys(user_id, version, room_id, session_id);
+DROP INDEX IF EXISTS e2e_room_keys_idx;
diff --git a/synapse/storage/schema/delta/56/hidden_devices.sql b/synapse/storage/schema/delta/56/hidden_devices.sql
new file mode 100644
index 0000000000..67f8b20297
--- /dev/null
+++ b/synapse/storage/schema/delta/56/hidden_devices.sql
@@ -0,0 +1,18 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- device list needs to know which ones are "real" devices, and which ones are
+-- just used to avoid collisions
+ALTER TABLE devices ADD COLUMN hidden BOOLEAN DEFAULT FALSE;
diff --git a/synapse/storage/schema/delta/56/redaction_censor.sql b/synapse/storage/schema/delta/56/redaction_censor.sql
new file mode 100644
index 0000000000..fe51b02309
--- /dev/null
+++ b/synapse/storage/schema/delta/56/redaction_censor.sql
@@ -0,0 +1,17 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE redactions ADD COLUMN have_censored BOOL NOT NULL DEFAULT false;
+CREATE INDEX redactions_have_censored ON redactions(event_id) WHERE not have_censored;
diff --git a/synapse/storage/schema/delta/56/stats_separated.sql b/synapse/storage/schema/delta/56/stats_separated.sql
new file mode 100644
index 0000000000..163529c071
--- /dev/null
+++ b/synapse/storage/schema/delta/56/stats_separated.sql
@@ -0,0 +1,152 @@
+/* Copyright 2018 New Vector Ltd
+ * Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+----- First clean up from previous versions of room stats.
+
+-- First remove old stats stuff
+DROP TABLE IF EXISTS room_stats;
+DROP TABLE IF EXISTS room_state;
+DROP TABLE IF EXISTS room_stats_state;
+DROP TABLE IF EXISTS user_stats;
+DROP TABLE IF EXISTS room_stats_earliest_tokens;
+DROP TABLE IF EXISTS _temp_populate_stats_position;
+DROP TABLE IF EXISTS _temp_populate_stats_rooms;
+DROP TABLE IF EXISTS stats_stream_pos;
+
+-- Unschedule old background updates if they're still scheduled
+DELETE FROM background_updates WHERE update_name IN (
+ 'populate_stats_createtables',
+ 'populate_stats_process_rooms',
+ 'populate_stats_process_users',
+ 'populate_stats_cleanup'
+);
+
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+ ('populate_stats_process_rooms', '{}', '');
+
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+ ('populate_stats_process_users', '{}', 'populate_stats_process_rooms');
+
+----- Create tables for our version of room stats.
+
+-- single-row table to track position of incremental updates
+DROP TABLE IF EXISTS stats_incremental_position;
+CREATE TABLE stats_incremental_position (
+ Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
+ stream_id BIGINT NOT NULL,
+ CHECK (Lock='X')
+);
+
+-- insert a null row and make sure it is the only one.
+INSERT INTO stats_incremental_position (
+ stream_id
+) SELECT COALESCE(MAX(stream_ordering), 0) from events;
+
+-- represents PRESENT room statistics for a room
+-- only holds absolute fields
+DROP TABLE IF EXISTS room_stats_current;
+CREATE TABLE room_stats_current (
+ room_id TEXT NOT NULL PRIMARY KEY,
+
+ -- These are absolute counts
+ current_state_events INT NOT NULL,
+ joined_members INT NOT NULL,
+ invited_members INT NOT NULL,
+ left_members INT NOT NULL,
+ banned_members INT NOT NULL,
+
+ local_users_in_room INT NOT NULL,
+
+ -- The maximum delta stream position that this row takes into account.
+ completed_delta_stream_id BIGINT NOT NULL
+);
+
+
+-- represents HISTORICAL room statistics for a room
+DROP TABLE IF EXISTS room_stats_historical;
+CREATE TABLE room_stats_historical (
+ room_id TEXT NOT NULL,
+ -- These stats cover the time from (end_ts - bucket_size)...end_ts (in ms).
+ -- Note that end_ts is quantised.
+ end_ts BIGINT NOT NULL,
+ bucket_size BIGINT NOT NULL,
+
+ -- These stats are absolute counts
+ current_state_events BIGINT NOT NULL,
+ joined_members BIGINT NOT NULL,
+ invited_members BIGINT NOT NULL,
+ left_members BIGINT NOT NULL,
+ banned_members BIGINT NOT NULL,
+ local_users_in_room BIGINT NOT NULL,
+
+ -- These stats are per time slice
+ total_events BIGINT NOT NULL,
+ total_event_bytes BIGINT NOT NULL,
+
+ PRIMARY KEY (room_id, end_ts)
+);
+
+-- We use this index to speed up deletion of ancient room stats.
+CREATE INDEX room_stats_historical_end_ts ON room_stats_historical (end_ts);
+
+-- represents PRESENT statistics for a user
+-- only holds absolute fields
+DROP TABLE IF EXISTS user_stats_current;
+CREATE TABLE user_stats_current (
+ user_id TEXT NOT NULL PRIMARY KEY,
+
+ joined_rooms BIGINT NOT NULL,
+
+ -- The maximum delta stream position that this row takes into account.
+ completed_delta_stream_id BIGINT NOT NULL
+);
+
+-- represents HISTORICAL statistics for a user
+DROP TABLE IF EXISTS user_stats_historical;
+CREATE TABLE user_stats_historical (
+ user_id TEXT NOT NULL,
+ end_ts BIGINT NOT NULL,
+ bucket_size BIGINT NOT NULL,
+
+ joined_rooms BIGINT NOT NULL,
+
+ invites_sent BIGINT NOT NULL,
+ rooms_created BIGINT NOT NULL,
+ total_events BIGINT NOT NULL,
+ total_event_bytes BIGINT NOT NULL,
+
+ PRIMARY KEY (user_id, end_ts)
+);
+
+-- We use this index to speed up deletion of ancient user stats.
+CREATE INDEX user_stats_historical_end_ts ON user_stats_historical (end_ts);
+
+
+CREATE TABLE room_stats_state (
+ room_id TEXT NOT NULL,
+ name TEXT,
+ canonical_alias TEXT,
+ join_rules TEXT,
+ history_visibility TEXT,
+ encryption TEXT,
+ avatar TEXT,
+ guest_access TEXT,
+ is_federatable BOOLEAN,
+ topic TEXT
+);
+
+CREATE UNIQUE INDEX room_stats_state_room ON room_stats_state(room_id);
diff --git a/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql b/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql
new file mode 100644
index 0000000000..149f8be8b6
--- /dev/null
+++ b/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql
@@ -0,0 +1,17 @@
+/* Copyright 2019 Matrix.org Foundation CIC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- this was apparently forgotten when the table was created back in delta 53.
+CREATE INDEX users_in_public_rooms_r_idx ON users_in_public_rooms(room_id);
diff --git a/synapse/storage/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/full_schemas/54/full.sql.postgres
index 098434356f..01a2b0e024 100644
--- a/synapse/storage/schema/full_schemas/54/full.sql.postgres
+++ b/synapse/storage/schema/full_schemas/54/full.sql.postgres
@@ -667,10 +667,19 @@ CREATE TABLE presence_stream (
+CREATE TABLE profile_replication_status (
+ host text NOT NULL,
+ last_synced_batch bigint NOT NULL
+);
+
+
+
CREATE TABLE profiles (
user_id text NOT NULL,
displayname text,
- avatar_url text
+ avatar_url text,
+ batch bigint,
+ active smallint DEFAULT 1 NOT NULL
);
@@ -1842,6 +1851,10 @@ CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id);
+CREATE INDEX profiles_batch_idx ON profiles USING btree (batch);
+
+
+
CREATE INDEX public_room_index ON rooms USING btree (is_public);
diff --git a/synapse/storage/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/schema/full_schemas/54/full.sql.sqlite
index be9295e4c9..f1a71627f0 100644
--- a/synapse/storage/schema/full_schemas/54/full.sql.sqlite
+++ b/synapse/storage/schema/full_schemas/54/full.sql.sqlite
@@ -6,7 +6,7 @@ CREATE TABLE presence_allow_inbound( observed_user_id TEXT NOT NULL, observer_us
CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, UNIQUE(name) );
CREATE TABLE access_tokens( id BIGINT PRIMARY KEY, user_id TEXT NOT NULL, device_id TEXT, token TEXT NOT NULL, last_used BIGINT, UNIQUE(token) );
CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL );
-CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) );
+CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, batch BIGINT DEFAULT NULL, active SMALLINT DEFAULT 1 NOT NULL, UNIQUE(user_id) );
CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) );
CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER );
CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, UNIQUE (event_id) );
@@ -208,6 +208,8 @@ CREATE INDEX group_users_u_idx ON group_users(user_id);
CREATE INDEX group_invites_u_idx ON group_invites(user_id);
CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms(group_id, room_id);
CREATE INDEX group_rooms_r_idx ON group_rooms(room_id);
+CREATE INDEX profiles_batch_idx ON profiles(batch);
+CREATE TABLE profile_replication_status ( host TEXT NOT NULL, last_synced_batch BIGINT NOT NULL );
CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL );
CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp);
CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp);
diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py
index e13efed417..09190d684e 100644
--- a/synapse/storage/stats.py
+++ b/synapse/storage/stats.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2018, 2019 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,17 +15,22 @@
# limitations under the License.
import logging
+from itertools import chain
from twisted.internet import defer
+from twisted.internet.defer import DeferredLock
from synapse.api.constants import EventTypes, Membership
-from synapse.storage.prepare_database import get_statements
+from synapse.storage import PostgresEngine
from synapse.storage.state_deltas import StateDeltasStore
from synapse.util.caches.descriptors import cached
logger = logging.getLogger(__name__)
# these fields track absolutes (e.g. total number of rooms on the server)
+# You can think of these as Prometheus Gauges.
+# You can draw these stats on a line graph.
+# Example: number of users in a room
ABSOLUTE_STATS_FIELDS = {
"room": (
"current_state_events",
@@ -32,14 +38,23 @@ ABSOLUTE_STATS_FIELDS = {
"invited_members",
"left_members",
"banned_members",
- "state_events",
+ "local_users_in_room",
),
- "user": ("public_rooms", "private_rooms"),
+ "user": ("joined_rooms",),
}
-TYPE_TO_ROOM = {"room": ("room_stats", "room_id"), "user": ("user_stats", "user_id")}
+# these fields are per-timeslice and so should be reset to 0 upon a new slice
+# You can draw these stats on a histogram.
+# Example: number of events sent locally during a time slice
+PER_SLICE_FIELDS = {
+ "room": ("total_events", "total_event_bytes"),
+ "user": ("invites_sent", "rooms_created", "total_events", "total_event_bytes"),
+}
+
+TYPE_TO_TABLE = {"room": ("room_stats", "room_id"), "user": ("user_stats", "user_id")}
-TEMP_TABLE = "_temp_populate_stats"
+# these are the tables (& ID columns) which contain our actual subjects
+TYPE_TO_ORIGIN_TABLE = {"room": ("rooms", "room_id"), "user": ("users", "name")}
class StatsStore(StateDeltasStore):
@@ -51,136 +66,102 @@ class StatsStore(StateDeltasStore):
self.stats_enabled = hs.config.stats_enabled
self.stats_bucket_size = hs.config.stats_bucket_size
- self.register_background_update_handler(
- "populate_stats_createtables", self._populate_stats_createtables
- )
+ self.stats_delta_processing_lock = DeferredLock()
+
self.register_background_update_handler(
"populate_stats_process_rooms", self._populate_stats_process_rooms
)
self.register_background_update_handler(
- "populate_stats_cleanup", self._populate_stats_cleanup
+ "populate_stats_process_users", self._populate_stats_process_users
)
+ # we no longer need to perform clean-up, but we will give ourselves
+ # the potential to reintroduce it in the future – so documentation
+ # will still encourage the use of this no-op handler.
+ self.register_noop_background_update("populate_stats_cleanup")
+ self.register_noop_background_update("populate_stats_prepare")
- @defer.inlineCallbacks
- def _populate_stats_createtables(self, progress, batch_size):
-
- if not self.stats_enabled:
- yield self._end_background_update("populate_stats_createtables")
- return 1
-
- # Get all the rooms that we want to process.
- def _make_staging_area(txn):
- # Create the temporary tables
- stmts = get_statements(
- """
- -- We just recreate the table, we'll be reinserting the
- -- correct entries again later anyway.
- DROP TABLE IF EXISTS {temp}_rooms;
-
- CREATE TABLE IF NOT EXISTS {temp}_rooms(
- room_id TEXT NOT NULL,
- events BIGINT NOT NULL
- );
-
- CREATE INDEX {temp}_rooms_events
- ON {temp}_rooms(events);
- CREATE INDEX {temp}_rooms_id
- ON {temp}_rooms(room_id);
- """.format(
- temp=TEMP_TABLE
- ).splitlines()
- )
-
- for statement in stmts:
- txn.execute(statement)
-
- sql = (
- "CREATE TABLE IF NOT EXISTS "
- + TEMP_TABLE
- + "_position(position TEXT NOT NULL)"
- )
- txn.execute(sql)
-
- # Get rooms we want to process from the database, only adding
- # those that we haven't (i.e. those not in room_stats_earliest_token)
- sql = """
- INSERT INTO %s_rooms (room_id, events)
- SELECT c.room_id, count(*) FROM current_state_events AS c
- LEFT JOIN room_stats_earliest_token AS t USING (room_id)
- WHERE t.room_id IS NULL
- GROUP BY c.room_id
- """ % (
- TEMP_TABLE,
- )
- txn.execute(sql)
+ def quantise_stats_time(self, ts):
+ """
+ Quantises a timestamp to be a multiple of the bucket size.
- new_pos = yield self.get_max_stream_id_in_current_state_deltas()
- yield self.runInteraction("populate_stats_temp_build", _make_staging_area)
- yield self._simple_insert(TEMP_TABLE + "_position", {"position": new_pos})
- self.get_earliest_token_for_room_stats.invalidate_all()
+ Args:
+ ts (int): the timestamp to quantise, in milliseconds since the Unix
+ Epoch
- yield self._end_background_update("populate_stats_createtables")
- return 1
+ Returns:
+ int: a timestamp which
+ - is divisible by the bucket size;
+ - is no later than `ts`; and
+ - is the largest such timestamp.
+ """
+ return (ts // self.stats_bucket_size) * self.stats_bucket_size
@defer.inlineCallbacks
- def _populate_stats_cleanup(self, progress, batch_size):
+ def _populate_stats_process_users(self, progress, batch_size):
"""
- Update the user directory stream position, then clean up the old tables.
+ This is a background update which regenerates statistics for users.
"""
if not self.stats_enabled:
- yield self._end_background_update("populate_stats_cleanup")
+ yield self._end_background_update("populate_stats_process_users")
return 1
- position = yield self._simple_select_one_onecol(
- TEMP_TABLE + "_position", None, "position"
+ last_user_id = progress.get("last_user_id", "")
+
+ def _get_next_batch(txn):
+ sql = """
+ SELECT DISTINCT name FROM users
+ WHERE name > ?
+ ORDER BY name ASC
+ LIMIT ?
+ """
+ txn.execute(sql, (last_user_id, batch_size))
+ return [r for r, in txn]
+
+ users_to_work_on = yield self.runInteraction(
+ "_populate_stats_process_users", _get_next_batch
)
- yield self.update_stats_stream_pos(position)
- def _delete_staging_area(txn):
- txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_rooms")
- txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position")
+ # No more rooms -- complete the transaction.
+ if not users_to_work_on:
+ yield self._end_background_update("populate_stats_process_users")
+ return 1
- yield self.runInteraction("populate_stats_cleanup", _delete_staging_area)
+ for user_id in users_to_work_on:
+ yield self._calculate_and_set_initial_state_for_user(user_id)
+ progress["last_user_id"] = user_id
- yield self._end_background_update("populate_stats_cleanup")
- return 1
+ yield self.runInteraction(
+ "populate_stats_process_users",
+ self._background_update_progress_txn,
+ "populate_stats_process_users",
+ progress,
+ )
+
+ return len(users_to_work_on)
@defer.inlineCallbacks
def _populate_stats_process_rooms(self, progress, batch_size):
-
+ """
+ This is a background update which regenerates statistics for rooms.
+ """
if not self.stats_enabled:
yield self._end_background_update("populate_stats_process_rooms")
return 1
- # If we don't have progress filed, delete everything.
- if not progress:
- yield self.delete_all_stats()
+ last_room_id = progress.get("last_room_id", "")
def _get_next_batch(txn):
- # Only fetch 250 rooms, so we don't fetch too many at once, even
- # if those 250 rooms have less than batch_size state events.
sql = """
- SELECT room_id, events FROM %s_rooms
- ORDER BY events DESC
- LIMIT 250
- """ % (
- TEMP_TABLE,
- )
- txn.execute(sql)
- rooms_to_work_on = txn.fetchall()
-
- if not rooms_to_work_on:
- return None
-
- # Get how many are left to process, so we can give status on how
- # far we are in processing
- txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms")
- progress["remaining"] = txn.fetchone()[0]
-
- return rooms_to_work_on
+ SELECT DISTINCT room_id FROM current_state_events
+ WHERE room_id > ?
+ ORDER BY room_id ASC
+ LIMIT ?
+ """
+ txn.execute(sql, (last_room_id, batch_size))
+ return [r for r, in txn]
rooms_to_work_on = yield self.runInteraction(
- "populate_stats_temp_read", _get_next_batch
+ "populate_stats_rooms_get_batch", _get_next_batch
)
# No more rooms -- complete the transaction.
@@ -188,154 +169,28 @@ class StatsStore(StateDeltasStore):
yield self._end_background_update("populate_stats_process_rooms")
return 1
- logger.info(
- "Processing the next %d rooms of %d remaining",
- len(rooms_to_work_on),
- progress["remaining"],
- )
-
- # Number of state events we've processed by going through each room
- processed_event_count = 0
-
- for room_id, event_count in rooms_to_work_on:
-
- current_state_ids = yield self.get_current_state_ids(room_id)
-
- join_rules_id = current_state_ids.get((EventTypes.JoinRules, ""))
- history_visibility_id = current_state_ids.get(
- (EventTypes.RoomHistoryVisibility, "")
- )
- encryption_id = current_state_ids.get((EventTypes.RoomEncryption, ""))
- name_id = current_state_ids.get((EventTypes.Name, ""))
- topic_id = current_state_ids.get((EventTypes.Topic, ""))
- avatar_id = current_state_ids.get((EventTypes.RoomAvatar, ""))
- canonical_alias_id = current_state_ids.get((EventTypes.CanonicalAlias, ""))
-
- event_ids = [
- join_rules_id,
- history_visibility_id,
- encryption_id,
- name_id,
- topic_id,
- avatar_id,
- canonical_alias_id,
- ]
-
- state_events = yield self.get_events(
- [ev for ev in event_ids if ev is not None]
- )
-
- def _get_or_none(event_id, arg):
- event = state_events.get(event_id)
- if event:
- return event.content.get(arg)
- return None
-
- yield self.update_room_state(
- room_id,
- {
- "join_rules": _get_or_none(join_rules_id, "join_rule"),
- "history_visibility": _get_or_none(
- history_visibility_id, "history_visibility"
- ),
- "encryption": _get_or_none(encryption_id, "algorithm"),
- "name": _get_or_none(name_id, "name"),
- "topic": _get_or_none(topic_id, "topic"),
- "avatar": _get_or_none(avatar_id, "url"),
- "canonical_alias": _get_or_none(canonical_alias_id, "alias"),
- },
- )
-
- now = self.hs.get_reactor().seconds()
-
- # quantise time to the nearest bucket
- now = (now // self.stats_bucket_size) * self.stats_bucket_size
-
- def _fetch_data(txn):
-
- # Get the current token of the room
- current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn)
-
- current_state_events = len(current_state_ids)
-
- membership_counts = self._get_user_counts_in_room_txn(txn, room_id)
-
- total_state_events = self._get_total_state_event_counts_txn(
- txn, room_id
- )
-
- self._update_stats_txn(
- txn,
- "room",
- room_id,
- now,
- {
- "bucket_size": self.stats_bucket_size,
- "current_state_events": current_state_events,
- "joined_members": membership_counts.get(Membership.JOIN, 0),
- "invited_members": membership_counts.get(Membership.INVITE, 0),
- "left_members": membership_counts.get(Membership.LEAVE, 0),
- "banned_members": membership_counts.get(Membership.BAN, 0),
- "state_events": total_state_events,
- },
- )
- self._simple_insert_txn(
- txn,
- "room_stats_earliest_token",
- {"room_id": room_id, "token": current_token},
- )
-
- # We've finished a room. Delete it from the table.
- self._simple_delete_one_txn(
- txn, TEMP_TABLE + "_rooms", {"room_id": room_id}
- )
+ for room_id in rooms_to_work_on:
+ yield self._calculate_and_set_initial_state_for_room(room_id)
+ progress["last_room_id"] = room_id
- yield self.runInteraction("update_room_stats", _fetch_data)
-
- # Update the remaining counter.
- progress["remaining"] -= 1
- yield self.runInteraction(
- "populate_stats",
- self._background_update_progress_txn,
- "populate_stats_process_rooms",
- progress,
- )
-
- processed_event_count += event_count
-
- if processed_event_count > batch_size:
- # Don't process any more rooms, we've hit our batch size.
- return processed_event_count
+ yield self.runInteraction(
+ "_populate_stats_process_rooms",
+ self._background_update_progress_txn,
+ "populate_stats_process_rooms",
+ progress,
+ )
- return processed_event_count
+ return len(rooms_to_work_on)
- def delete_all_stats(self):
+ def get_stats_positions(self):
"""
- Delete all statistics records.
+ Returns the stats processor positions.
"""
-
- def _delete_all_stats_txn(txn):
- txn.execute("DELETE FROM room_state")
- txn.execute("DELETE FROM room_stats")
- txn.execute("DELETE FROM room_stats_earliest_token")
- txn.execute("DELETE FROM user_stats")
-
- return self.runInteraction("delete_all_stats", _delete_all_stats_txn)
-
- def get_stats_stream_pos(self):
return self._simple_select_one_onecol(
- table="stats_stream_pos",
+ table="stats_incremental_position",
keyvalues={},
retcol="stream_id",
- desc="stats_stream_pos",
- )
-
- def update_stats_stream_pos(self, stream_id):
- return self._simple_update_one(
- table="stats_stream_pos",
- keyvalues={},
- updatevalues={"stream_id": stream_id},
- desc="update_stats_stream_pos",
+ desc="stats_incremental_position",
)
def update_room_state(self, room_id, fields):
@@ -361,42 +216,87 @@ class StatsStore(StateDeltasStore):
fields[col] = None
return self._simple_upsert(
- table="room_state",
+ table="room_stats_state",
keyvalues={"room_id": room_id},
values=fields,
desc="update_room_state",
)
- def get_deltas_for_room(self, room_id, start, size=100):
+ def get_statistics_for_subject(self, stats_type, stats_id, start, size=100):
"""
- Get statistics deltas for a given room.
+ Get statistics for a given subject.
Args:
- room_id (str)
+ stats_type (str): The type of subject
+ stats_id (str): The ID of the subject (e.g. room_id or user_id)
start (int): Pagination start. Number of entries, not timestamp.
size (int): How many entries to return.
Returns:
Deferred[list[dict]], where the dict has the keys of
- ABSOLUTE_STATS_FIELDS["room"] and "ts".
+ ABSOLUTE_STATS_FIELDS[stats_type], and "bucket_size" and "end_ts".
"""
- return self._simple_select_list_paginate(
- "room_stats",
- {"room_id": room_id},
- "ts",
+ return self.runInteraction(
+ "get_statistics_for_subject",
+ self._get_statistics_for_subject_txn,
+ stats_type,
+ stats_id,
start,
size,
- retcols=(list(ABSOLUTE_STATS_FIELDS["room"]) + ["ts"]),
+ )
+
+ def _get_statistics_for_subject_txn(
+ self, txn, stats_type, stats_id, start, size=100
+ ):
+ """
+ Transaction-bound version of L{get_statistics_for_subject}.
+ """
+
+ table, id_col = TYPE_TO_TABLE[stats_type]
+ selected_columns = list(
+ ABSOLUTE_STATS_FIELDS[stats_type] + PER_SLICE_FIELDS[stats_type]
+ )
+
+ slice_list = self._simple_select_list_paginate_txn(
+ txn,
+ table + "_historical",
+ {id_col: stats_id},
+ "end_ts",
+ start,
+ size,
+ retcols=selected_columns + ["bucket_size", "end_ts"],
order_direction="DESC",
)
- def get_all_room_state(self):
- return self._simple_select_list(
- "room_state", None, retcols=("name", "topic", "canonical_alias")
+ return slice_list
+
+ def get_room_stats_state(self, room_id):
+ """
+ Returns the current room_stats_state for a room.
+
+ Args:
+ room_id (str): The ID of the room to return state for.
+
+ Returns (dict):
+ Dictionary containing these keys:
+ "name", "topic", "canonical_alias", "avatar", "join_rules",
+ "history_visibility"
+ """
+ return self._simple_select_one(
+ "room_stats_state",
+ {"room_id": room_id},
+ retcols=(
+ "name",
+ "topic",
+ "canonical_alias",
+ "avatar",
+ "join_rules",
+ "history_visibility",
+ ),
)
@cached()
- def get_earliest_token_for_room_stats(self, room_id):
+ def get_earliest_token_for_stats(self, stats_type, id):
"""
Fetch the "earliest token". This is used by the room stats delta
processor to ignore deltas that have been processed between the
@@ -406,79 +306,573 @@ class StatsStore(StateDeltasStore):
Returns:
Deferred[int]
"""
+ table, id_col = TYPE_TO_TABLE[stats_type]
+
return self._simple_select_one_onecol(
- "room_stats_earliest_token",
- {"room_id": room_id},
- retcol="token",
+ "%s_current" % (table,),
+ keyvalues={id_col: id},
+ retcol="completed_delta_stream_id",
allow_none=True,
)
- def update_stats(self, stats_type, stats_id, ts, fields):
- table, id_col = TYPE_TO_ROOM[stats_type]
- return self._simple_upsert(
- table=table,
- keyvalues={id_col: stats_id, "ts": ts},
- values=fields,
- desc="update_stats",
+ def bulk_update_stats_delta(self, ts, updates, stream_id):
+ """Bulk update stats tables for a given stream_id and updates the stats
+ incremental position.
+
+ Args:
+ ts (int): Current timestamp in ms
+ updates(dict[str, dict[str, dict[str, Counter]]]): The updates to
+ commit as a mapping stats_type -> stats_id -> field -> delta.
+ stream_id (int): Current position.
+
+ Returns:
+ Deferred
+ """
+
+ def _bulk_update_stats_delta_txn(txn):
+ for stats_type, stats_updates in updates.items():
+ for stats_id, fields in stats_updates.items():
+ self._update_stats_delta_txn(
+ txn,
+ ts=ts,
+ stats_type=stats_type,
+ stats_id=stats_id,
+ fields=fields,
+ complete_with_stream_id=stream_id,
+ )
+
+ self._simple_update_one_txn(
+ txn,
+ table="stats_incremental_position",
+ keyvalues={},
+ updatevalues={"stream_id": stream_id},
+ )
+
+ return self.runInteraction(
+ "bulk_update_stats_delta", _bulk_update_stats_delta_txn
)
- def _update_stats_txn(self, txn, stats_type, stats_id, ts, fields):
- table, id_col = TYPE_TO_ROOM[stats_type]
- return self._simple_upsert_txn(
- txn, table=table, keyvalues={id_col: stats_id, "ts": ts}, values=fields
+ def update_stats_delta(
+ self,
+ ts,
+ stats_type,
+ stats_id,
+ fields,
+ complete_with_stream_id,
+ absolute_field_overrides=None,
+ ):
+ """
+ Updates the statistics for a subject, with a delta (difference/relative
+ change).
+
+ Args:
+ ts (int): timestamp of the change
+ stats_type (str): "room" or "user" – the kind of subject
+ stats_id (str): the subject's ID (room ID or user ID)
+ fields (dict[str, int]): Deltas of stats values.
+ complete_with_stream_id (int, optional):
+ If supplied, converts an incomplete row into a complete row,
+ with the supplied stream_id marked as the stream_id where the
+ row was completed.
+ absolute_field_overrides (dict[str, int]): Current stats values
+ (i.e. not deltas) of absolute fields.
+ Does not work with per-slice fields.
+ """
+
+ return self.runInteraction(
+ "update_stats_delta",
+ self._update_stats_delta_txn,
+ ts,
+ stats_type,
+ stats_id,
+ fields,
+ complete_with_stream_id=complete_with_stream_id,
+ absolute_field_overrides=absolute_field_overrides,
)
- def update_stats_delta(self, ts, stats_type, stats_id, field, value):
- def _update_stats_delta(txn):
- table, id_col = TYPE_TO_ROOM[stats_type]
-
- sql = (
- "SELECT * FROM %s"
- " WHERE %s=? and ts=("
- " SELECT MAX(ts) FROM %s"
- " WHERE %s=?"
- ")"
- ) % (table, id_col, table, id_col)
- txn.execute(sql, (stats_id, stats_id))
- rows = self.cursor_to_dict(txn)
- if len(rows) == 0:
- # silently skip as we don't have anything to apply a delta to yet.
- # this tries to minimise any race between the initial sync and
- # subsequent deltas arriving.
- return
-
- current_ts = ts
- latest_ts = rows[0]["ts"]
- if current_ts < latest_ts:
- # This one is in the past, but we're just encountering it now.
- # Mark it as part of the current bucket.
- current_ts = latest_ts
- elif ts != latest_ts:
- # we have to copy our absolute counters over to the new entry.
- values = {
- key: rows[0][key] for key in ABSOLUTE_STATS_FIELDS[stats_type]
- }
- values[id_col] = stats_id
- values["ts"] = ts
- values["bucket_size"] = self.stats_bucket_size
-
- self._simple_insert_txn(txn, table=table, values=values)
-
- # actually update the new value
- if stats_type in ABSOLUTE_STATS_FIELDS[stats_type]:
- self._simple_update_txn(
- txn,
- table=table,
- keyvalues={id_col: stats_id, "ts": current_ts},
- updatevalues={field: value},
+ def _update_stats_delta_txn(
+ self,
+ txn,
+ ts,
+ stats_type,
+ stats_id,
+ fields,
+ complete_with_stream_id,
+ absolute_field_overrides=None,
+ ):
+ if absolute_field_overrides is None:
+ absolute_field_overrides = {}
+
+ table, id_col = TYPE_TO_TABLE[stats_type]
+
+ quantised_ts = self.quantise_stats_time(int(ts))
+ end_ts = quantised_ts + self.stats_bucket_size
+
+ # Lets be paranoid and check that all the given field names are known
+ abs_field_names = ABSOLUTE_STATS_FIELDS[stats_type]
+ slice_field_names = PER_SLICE_FIELDS[stats_type]
+ for field in chain(fields.keys(), absolute_field_overrides.keys()):
+ if field not in abs_field_names and field not in slice_field_names:
+ # guard against potential SQL injection dodginess
+ raise ValueError(
+ "%s is not a recognised field"
+ " for stats type %s" % (field, stats_type)
)
+
+ # Per slice fields do not get added to the _current table
+
+ # This calculates the deltas (`field = field + ?` values)
+ # for absolute fields,
+ # * defaulting to 0 if not specified
+ # (required for the INSERT part of upserting to work)
+ # * omitting overrides specified in `absolute_field_overrides`
+ deltas_of_absolute_fields = {
+ key: fields.get(key, 0)
+ for key in abs_field_names
+ if key not in absolute_field_overrides
+ }
+
+ # Keep the delta stream ID field up to date
+ absolute_field_overrides = absolute_field_overrides.copy()
+ absolute_field_overrides["completed_delta_stream_id"] = complete_with_stream_id
+
+ # first upsert the `_current` table
+ self._upsert_with_additive_relatives_txn(
+ txn=txn,
+ table=table + "_current",
+ keyvalues={id_col: stats_id},
+ absolutes=absolute_field_overrides,
+ additive_relatives=deltas_of_absolute_fields,
+ )
+
+ per_slice_additive_relatives = {
+ key: fields.get(key, 0) for key in slice_field_names
+ }
+ self._upsert_copy_from_table_with_additive_relatives_txn(
+ txn=txn,
+ into_table=table + "_historical",
+ keyvalues={id_col: stats_id},
+ extra_dst_insvalues={"bucket_size": self.stats_bucket_size},
+ extra_dst_keyvalues={"end_ts": end_ts},
+ additive_relatives=per_slice_additive_relatives,
+ src_table=table + "_current",
+ copy_columns=abs_field_names,
+ )
+
+ def _upsert_with_additive_relatives_txn(
+ self, txn, table, keyvalues, absolutes, additive_relatives
+ ):
+ """Used to update values in the stats tables.
+
+ This is basically a slightly convoluted upsert that *adds* to any
+ existing rows.
+
+ Args:
+ txn
+ table (str): Table name
+ keyvalues (dict[str, any]): Row-identifying key values
+ absolutes (dict[str, any]): Absolute (set) fields
+ additive_relatives (dict[str, int]): Fields that will be added onto
+ if existing row present.
+ """
+ if self.database_engine.can_native_upsert:
+ absolute_updates = [
+ "%(field)s = EXCLUDED.%(field)s" % {"field": field}
+ for field in absolutes.keys()
+ ]
+
+ relative_updates = [
+ "%(field)s = EXCLUDED.%(field)s + %(table)s.%(field)s"
+ % {"table": table, "field": field}
+ for field in additive_relatives.keys()
+ ]
+
+ insert_cols = []
+ qargs = []
+
+ for (key, val) in chain(
+ keyvalues.items(), absolutes.items(), additive_relatives.items()
+ ):
+ insert_cols.append(key)
+ qargs.append(val)
+
+ sql = """
+ INSERT INTO %(table)s (%(insert_cols_cs)s)
+ VALUES (%(insert_vals_qs)s)
+ ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s
+ """ % {
+ "table": table,
+ "insert_cols_cs": ", ".join(insert_cols),
+ "insert_vals_qs": ", ".join(
+ ["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives))
+ ),
+ "key_columns": ", ".join(keyvalues),
+ "updates": ", ".join(chain(absolute_updates, relative_updates)),
+ }
+
+ txn.execute(sql, qargs)
+ else:
+ self.database_engine.lock_table(txn, table)
+ retcols = list(chain(absolutes.keys(), additive_relatives.keys()))
+ current_row = self._simple_select_one_txn(
+ txn, table, keyvalues, retcols, allow_none=True
+ )
+ if current_row is None:
+ merged_dict = {**keyvalues, **absolutes, **additive_relatives}
+ self._simple_insert_txn(txn, table, merged_dict)
else:
- sql = ("UPDATE %s SET %s=%s+? WHERE %s=? AND ts=?") % (
- table,
- field,
- field,
- id_col,
+ for (key, val) in additive_relatives.items():
+ current_row[key] += val
+ current_row.update(absolutes)
+ self._simple_update_one_txn(txn, table, keyvalues, current_row)
+
+ def _upsert_copy_from_table_with_additive_relatives_txn(
+ self,
+ txn,
+ into_table,
+ keyvalues,
+ extra_dst_keyvalues,
+ extra_dst_insvalues,
+ additive_relatives,
+ src_table,
+ copy_columns,
+ ):
+ """Updates the historic stats table with latest updates.
+
+ This involves copying "absolute" fields from the `_current` table, and
+ adding relative fields to any existing values.
+
+ Args:
+ txn: Transaction
+ into_table (str): The destination table to UPSERT the row into
+ keyvalues (dict[str, any]): Row-identifying key values
+ extra_dst_keyvalues (dict[str, any]): Additional keyvalues
+ for `into_table`.
+ extra_dst_insvalues (dict[str, any]): Additional values to insert
+ on new row creation for `into_table`.
+ additive_relatives (dict[str, any]): Fields that will be added onto
+ if existing row present. (Must be disjoint from copy_columns.)
+ src_table (str): The source table to copy from
+ copy_columns (iterable[str]): The list of columns to copy
+ """
+ if self.database_engine.can_native_upsert:
+ ins_columns = chain(
+ keyvalues,
+ copy_columns,
+ additive_relatives,
+ extra_dst_keyvalues,
+ extra_dst_insvalues,
+ )
+ sel_exprs = chain(
+ keyvalues,
+ copy_columns,
+ (
+ "?"
+ for _ in chain(
+ additive_relatives, extra_dst_keyvalues, extra_dst_insvalues
+ )
+ ),
+ )
+ keyvalues_where = ("%s = ?" % f for f in keyvalues)
+
+ sets_cc = ("%s = EXCLUDED.%s" % (f, f) for f in copy_columns)
+ sets_ar = (
+ "%s = EXCLUDED.%s + %s.%s" % (f, f, into_table, f)
+ for f in additive_relatives
+ )
+
+ sql = """
+ INSERT INTO %(into_table)s (%(ins_columns)s)
+ SELECT %(sel_exprs)s
+ FROM %(src_table)s
+ WHERE %(keyvalues_where)s
+ ON CONFLICT (%(keyvalues)s)
+ DO UPDATE SET %(sets)s
+ """ % {
+ "into_table": into_table,
+ "ins_columns": ", ".join(ins_columns),
+ "sel_exprs": ", ".join(sel_exprs),
+ "keyvalues_where": " AND ".join(keyvalues_where),
+ "src_table": src_table,
+ "keyvalues": ", ".join(
+ chain(keyvalues.keys(), extra_dst_keyvalues.keys())
+ ),
+ "sets": ", ".join(chain(sets_cc, sets_ar)),
+ }
+
+ qargs = list(
+ chain(
+ additive_relatives.values(),
+ extra_dst_keyvalues.values(),
+ extra_dst_insvalues.values(),
+ keyvalues.values(),
)
- txn.execute(sql, (value, stats_id, current_ts))
+ )
+ txn.execute(sql, qargs)
+ else:
+ self.database_engine.lock_table(txn, into_table)
+ src_row = self._simple_select_one_txn(
+ txn, src_table, keyvalues, copy_columns
+ )
+ all_dest_keyvalues = {**keyvalues, **extra_dst_keyvalues}
+ dest_current_row = self._simple_select_one_txn(
+ txn,
+ into_table,
+ keyvalues=all_dest_keyvalues,
+ retcols=list(chain(additive_relatives.keys(), copy_columns)),
+ allow_none=True,
+ )
+
+ if dest_current_row is None:
+ merged_dict = {
+ **keyvalues,
+ **extra_dst_keyvalues,
+ **extra_dst_insvalues,
+ **src_row,
+ **additive_relatives,
+ }
+ self._simple_insert_txn(txn, into_table, merged_dict)
+ else:
+ for (key, val) in additive_relatives.items():
+ src_row[key] = dest_current_row[key] + val
+ self._simple_update_txn(txn, into_table, all_dest_keyvalues, src_row)
+
+ def get_changes_room_total_events_and_bytes(self, min_pos, max_pos):
+ """Fetches the counts of events in the given range of stream IDs.
+
+ Args:
+ min_pos (int)
+ max_pos (int)
+
+ Returns:
+ Deferred[dict[str, dict[str, int]]]: Mapping of room ID to field
+ changes.
+ """
+
+ return self.runInteraction(
+ "stats_incremental_total_events_and_bytes",
+ self.get_changes_room_total_events_and_bytes_txn,
+ min_pos,
+ max_pos,
+ )
- return self.runInteraction("update_stats_delta", _update_stats_delta)
+ def get_changes_room_total_events_and_bytes_txn(self, txn, low_pos, high_pos):
+ """Gets the total_events and total_event_bytes counts for rooms and
+ senders, in a range of stream_orderings (including backfilled events).
+
+ Args:
+ txn
+ low_pos (int): Low stream ordering
+ high_pos (int): High stream ordering
+
+ Returns:
+ tuple[dict[str, dict[str, int]], dict[str, dict[str, int]]]: The
+ room and user deltas for total_events/total_event_bytes in the
+ format of `stats_id` -> fields
+ """
+
+ if low_pos >= high_pos:
+ # nothing to do here.
+ return {}, {}
+
+ if isinstance(self.database_engine, PostgresEngine):
+ new_bytes_expression = "OCTET_LENGTH(json)"
+ else:
+ new_bytes_expression = "LENGTH(CAST(json AS BLOB))"
+
+ sql = """
+ SELECT events.room_id, COUNT(*) AS new_events, SUM(%s) AS new_bytes
+ FROM events INNER JOIN event_json USING (event_id)
+ WHERE (? < stream_ordering AND stream_ordering <= ?)
+ OR (? <= stream_ordering AND stream_ordering <= ?)
+ GROUP BY events.room_id
+ """ % (
+ new_bytes_expression,
+ )
+
+ txn.execute(sql, (low_pos, high_pos, -high_pos, -low_pos))
+
+ room_deltas = {
+ room_id: {"total_events": new_events, "total_event_bytes": new_bytes}
+ for room_id, new_events, new_bytes in txn
+ }
+
+ sql = """
+ SELECT events.sender, COUNT(*) AS new_events, SUM(%s) AS new_bytes
+ FROM events INNER JOIN event_json USING (event_id)
+ WHERE (? < stream_ordering AND stream_ordering <= ?)
+ OR (? <= stream_ordering AND stream_ordering <= ?)
+ GROUP BY events.sender
+ """ % (
+ new_bytes_expression,
+ )
+
+ txn.execute(sql, (low_pos, high_pos, -high_pos, -low_pos))
+
+ user_deltas = {
+ user_id: {"total_events": new_events, "total_event_bytes": new_bytes}
+ for user_id, new_events, new_bytes in txn
+ if self.hs.is_mine_id(user_id)
+ }
+
+ return room_deltas, user_deltas
+
+ @defer.inlineCallbacks
+ def _calculate_and_set_initial_state_for_room(self, room_id):
+ """Calculate and insert an entry into room_stats_current.
+
+ Args:
+ room_id (str)
+
+ Returns:
+ Deferred[tuple[dict, dict, int]]: A tuple of room state, membership
+ counts and stream position.
+ """
+
+ def _fetch_current_state_stats(txn):
+ pos = self.get_room_max_stream_ordering()
+
+ rows = self._simple_select_many_txn(
+ txn,
+ table="current_state_events",
+ column="type",
+ iterable=[
+ EventTypes.Create,
+ EventTypes.JoinRules,
+ EventTypes.RoomHistoryVisibility,
+ EventTypes.Encryption,
+ EventTypes.Name,
+ EventTypes.Topic,
+ EventTypes.RoomAvatar,
+ EventTypes.CanonicalAlias,
+ ],
+ keyvalues={"room_id": room_id, "state_key": ""},
+ retcols=["event_id"],
+ )
+
+ event_ids = [row["event_id"] for row in rows]
+
+ txn.execute(
+ """
+ SELECT membership, count(*) FROM current_state_events
+ WHERE room_id = ? AND type = 'm.room.member'
+ GROUP BY membership
+ """,
+ (room_id,),
+ )
+ membership_counts = {membership: cnt for membership, cnt in txn}
+
+ txn.execute(
+ """
+ SELECT COALESCE(count(*), 0) FROM current_state_events
+ WHERE room_id = ?
+ """,
+ (room_id,),
+ )
+
+ current_state_events_count, = txn.fetchone()
+
+ users_in_room = self.get_users_in_room_txn(txn, room_id)
+
+ return (
+ event_ids,
+ membership_counts,
+ current_state_events_count,
+ users_in_room,
+ pos,
+ )
+
+ (
+ event_ids,
+ membership_counts,
+ current_state_events_count,
+ users_in_room,
+ pos,
+ ) = yield self.runInteraction(
+ "get_initial_state_for_room", _fetch_current_state_stats
+ )
+
+ state_event_map = yield self.get_events(event_ids, get_prev_content=False)
+
+ room_state = {
+ "join_rules": None,
+ "history_visibility": None,
+ "encryption": None,
+ "name": None,
+ "topic": None,
+ "avatar": None,
+ "canonical_alias": None,
+ "is_federatable": True,
+ }
+
+ for event in state_event_map.values():
+ if event.type == EventTypes.JoinRules:
+ room_state["join_rules"] = event.content.get("join_rule")
+ elif event.type == EventTypes.RoomHistoryVisibility:
+ room_state["history_visibility"] = event.content.get(
+ "history_visibility"
+ )
+ elif event.type == EventTypes.Encryption:
+ room_state["encryption"] = event.content.get("algorithm")
+ elif event.type == EventTypes.Name:
+ room_state["name"] = event.content.get("name")
+ elif event.type == EventTypes.Topic:
+ room_state["topic"] = event.content.get("topic")
+ elif event.type == EventTypes.RoomAvatar:
+ room_state["avatar"] = event.content.get("url")
+ elif event.type == EventTypes.CanonicalAlias:
+ room_state["canonical_alias"] = event.content.get("alias")
+ elif event.type == EventTypes.Create:
+ room_state["is_federatable"] = (
+ event.content.get("m.federate", True) is True
+ )
+
+ yield self.update_room_state(room_id, room_state)
+
+ local_users_in_room = [u for u in users_in_room if self.hs.is_mine_id(u)]
+
+ yield self.update_stats_delta(
+ ts=self.clock.time_msec(),
+ stats_type="room",
+ stats_id=room_id,
+ fields={},
+ complete_with_stream_id=pos,
+ absolute_field_overrides={
+ "current_state_events": current_state_events_count,
+ "joined_members": membership_counts.get(Membership.JOIN, 0),
+ "invited_members": membership_counts.get(Membership.INVITE, 0),
+ "left_members": membership_counts.get(Membership.LEAVE, 0),
+ "banned_members": membership_counts.get(Membership.BAN, 0),
+ "local_users_in_room": len(local_users_in_room),
+ },
+ )
+
+ @defer.inlineCallbacks
+ def _calculate_and_set_initial_state_for_user(self, user_id):
+ def _calculate_and_set_initial_state_for_user_txn(txn):
+ pos = self._get_max_stream_id_in_current_state_deltas_txn(txn)
+
+ txn.execute(
+ """
+ SELECT COUNT(distinct room_id) FROM current_state_events
+ WHERE type = 'm.room.member' AND state_key = ?
+ AND membership = 'join'
+ """,
+ (user_id,),
+ )
+ count, = txn.fetchone()
+ return count, pos
+
+ joined_rooms, pos = yield self.runInteraction(
+ "calculate_and_set_initial_state_for_user",
+ _calculate_and_set_initial_state_for_user_txn,
+ )
+
+ yield self.update_stats_delta(
+ ts=self.clock.time_msec(),
+ stats_type="user",
+ stats_id=user_id,
+ fields={},
+ complete_with_stream_id=pos,
+ absolute_field_overrides={"joined_rooms": joined_rooms},
+ )
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 856c2ee8d8..490454f19a 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -364,7 +364,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
the chunk of events returned.
"""
if from_key == to_key:
- return ([], from_key)
+ return [], from_key
from_id = RoomStreamToken.parse_stream_token(from_key).stream
to_id = RoomStreamToken.parse_stream_token(to_key).stream
@@ -374,7 +374,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
)
if not has_changed:
- return ([], from_key)
+ return [], from_key
def f(txn):
sql = (
@@ -407,7 +407,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
# get.
key = from_key
- return (ret, key)
+ return ret, key
@defer.inlineCallbacks
def get_membership_changes_for_user(self, user_id, from_key, to_key):
@@ -496,7 +496,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
"""
# Allow a zero limit here, and no-op.
if limit == 0:
- return ([], end_token)
+ return [], end_token
end_token = RoomStreamToken.parse(end_token)
@@ -511,7 +511,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
# We want to return the results in ascending order.
rows.reverse()
- return (rows, token)
+ return rows, token
def get_room_event_after_stream_ordering(self, room_id, stream_ordering):
"""Gets details of the first event in a room at or after a stream ordering
@@ -783,7 +783,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
events = yield self.get_events_as_list(event_ids)
- return (upper_bound, events)
+ return upper_bound, events
def get_federation_out_pos(self, typ):
return self._simple_select_one_onecol(
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index b3c3bf55bc..d81ace0ece 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -250,26 +250,6 @@ class TransactionStore(SQLBaseStore):
},
)
- def get_destinations_needing_retry(self):
- """Get all destinations which are due a retry for sending a transaction.
-
- Returns:
- list: A list of dicts
- """
-
- return self.runInteraction(
- "get_destinations_needing_retry", self._get_destinations_needing_retry
- )
-
- def _get_destinations_needing_retry(self, txn):
- query = (
- "SELECT * FROM destinations"
- " WHERE retry_last_ts > 0 and retry_next_ts < ?"
- )
-
- txn.execute(query, (self._clock.time_msec(),))
- return self.cursor_to_dict(txn)
-
def _start_cleanup_transactions(self):
return run_as_background_process(
"cleanup_transactions", self._cleanup_transactions
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index f1c8d99419..cbb0a4810a 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -195,6 +195,6 @@ class ChainedIdGenerator(object):
with self._lock:
if self._unfinished_ids:
stream_id, chained_id = self._unfinished_ids[0]
- return (stream_id - 1, chained_id)
+ return stream_id - 1, chained_id
- return (self._current_max, self.chained_generator.get_current_token())
+ return self._current_max, self.chained_generator.get_current_token()
diff --git a/synapse/streams/config.py b/synapse/streams/config.py
index f7f5906a99..02994ab2a5 100644
--- a/synapse/streams/config.py
+++ b/synapse/streams/config.py
@@ -37,7 +37,7 @@ class SourcePaginationConfig(object):
self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None
def __repr__(self):
- return ("StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)") % (
+ return "StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)" % (
self.from_key,
self.to_key,
self.direction,
diff --git a/synapse/third_party_rules/access_rules.py b/synapse/third_party_rules/access_rules.py
new file mode 100644
index 0000000000..253bba664b
--- /dev/null
+++ b/synapse/third_party_rules/access_rules.py
@@ -0,0 +1,586 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import email.utils
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, JoinRules, Membership, RoomCreationPreset
+from synapse.api.errors import SynapseError
+from synapse.config._base import ConfigError
+from synapse.types import get_domain_from_id
+
+ACCESS_RULES_TYPE = "im.vector.room.access_rules"
+ACCESS_RULE_RESTRICTED = "restricted"
+ACCESS_RULE_UNRESTRICTED = "unrestricted"
+ACCESS_RULE_DIRECT = "direct"
+
+VALID_ACCESS_RULES = (
+ ACCESS_RULE_DIRECT,
+ ACCESS_RULE_RESTRICTED,
+ ACCESS_RULE_UNRESTRICTED,
+)
+
+# Rules to which we need to apply the power levels restrictions.
+#
+# These are all of the rules that neither:
+# * forbid users from joining based on a server blacklist (which means that there
+# is no need to apply power level restrictions), nor
+# * target direct chats (since we allow both users to be room admins in this case).
+#
+# The power-level restrictions, when they are applied, prevent the following:
+# * the default power level for users (users_default) being set to anything other than 0.
+# * a non-default power level being assigned to any user which would be forbidden from
+# joining a restricted room.
+RULES_WITH_RESTRICTED_POWER_LEVELS = (ACCESS_RULE_UNRESTRICTED,)
+
+
+class RoomAccessRules(object):
+ """Implementation of the ThirdPartyEventRules module API that allows federation admins
+ to define custom rules for specific events and actions.
+ Implements the custom behaviour for the "im.vector.room.access_rules" state event.
+
+ Takes a config in the format:
+
+ third_party_event_rules:
+ module: third_party_rules.RoomAccessRules
+ config:
+ # List of domains (server names) that can't be invited to rooms if the
+ # "restricted" rule is set. Defaults to an empty list.
+ domains_forbidden_when_restricted: []
+
+ # Identity server to use when checking the HS an email address belongs to
+ # using the /info endpoint. Required.
+ id_server: "vector.im"
+
+ Don't forget to consider if you can invite users from your own domain.
+ """
+
+ def __init__(self, config, http_client):
+ self.http_client = http_client
+
+ self.id_server = config["id_server"]
+
+ self.domains_forbidden_when_restricted = config.get(
+ "domains_forbidden_when_restricted", []
+ )
+
+ @staticmethod
+ def parse_config(config):
+ if "id_server" in config:
+ return config
+ else:
+ raise ConfigError("No IS for event rules TchapEventRules")
+
+ def on_create_room(self, requester, config, is_requester_admin):
+ """Implements synapse.events.ThirdPartyEventRules.on_create_room
+
+ Checks if a im.vector.room.access_rules event is being set during room creation.
+ If yes, make sure the event is correct. Otherwise, append an event with the
+ default rule to the initial state.
+ """
+ is_direct = config.get("is_direct")
+ preset = config.get("preset")
+ access_rule = None
+ join_rule = None
+
+ # If there's a rules event in the initial state, check if it complies with the
+ # spec for im.vector.room.access_rules and deny the request if not.
+ for event in config.get("initial_state", []):
+ if event["type"] == ACCESS_RULES_TYPE:
+ access_rule = event["content"].get("rule")
+
+ # Make sure the event has a valid content.
+ if access_rule is None:
+ raise SynapseError(400, "Invalid access rule")
+
+ # Make sure the rule name is valid.
+ if access_rule not in VALID_ACCESS_RULES:
+ raise SynapseError(400, "Invalid access rule")
+
+ # Make sure the rule is "direct" if the room is a direct chat.
+ if (is_direct and access_rule != ACCESS_RULE_DIRECT) or (
+ access_rule == ACCESS_RULE_DIRECT and not is_direct
+ ):
+ raise SynapseError(400, "Invalid access rule")
+
+ if event["type"] == EventTypes.JoinRules:
+ join_rule = event["content"].get("join_rule")
+
+ if access_rule is None:
+ # If there's no access rules event in the initial state, create one with the
+ # default setting.
+ if is_direct:
+ default_rule = ACCESS_RULE_DIRECT
+ else:
+ # If the default value for non-direct chat changes, we should make another
+ # case here for rooms created with either a "public" join_rule or the
+ # "public_chat" preset to make sure those keep defaulting to "restricted"
+ default_rule = ACCESS_RULE_RESTRICTED
+
+ if not config.get("initial_state"):
+ config["initial_state"] = []
+
+ config["initial_state"].append(
+ {
+ "type": ACCESS_RULES_TYPE,
+ "state_key": "",
+ "content": {"rule": default_rule},
+ }
+ )
+
+ access_rule = default_rule
+
+ # Check that the preset or the join rule in use is compatible with the access
+ # rule, whether it's a user-defined one or the default one (i.e. if it involves
+ # a "public" join rule, the access rule must be "restricted").
+ if (
+ join_rule == JoinRules.PUBLIC or preset == RoomCreationPreset.PUBLIC_CHAT
+ ) and access_rule != ACCESS_RULE_RESTRICTED:
+ raise SynapseError(400, "Invalid access rule")
+
+ # Check if the creator can override values for the power levels.
+ allowed = self._is_power_level_content_allowed(
+ config.get("power_level_content_override", {}), access_rule
+ )
+ if not allowed:
+ raise SynapseError(400, "Invalid power levels content override")
+
+ # Second loop for events we need to know the current rule to process.
+ for event in config.get("initial_state", []):
+ if event["type"] == EventTypes.PowerLevels:
+ allowed = self._is_power_level_content_allowed(
+ event["content"], access_rule
+ )
+ if not allowed:
+ raise SynapseError(400, "Invalid power levels content")
+
+ @defer.inlineCallbacks
+ def check_threepid_can_be_invited(self, medium, address, state_events):
+ """Implements synapse.events.ThirdPartyEventRules.check_threepid_can_be_invited
+
+ Check if a threepid can be invited to the room via a 3PID invite given the current
+ rules and the threepid's address, by retrieving the HS it's mapped to from the
+ configured identity server, and checking if we can invite users from it.
+ """
+ rule = self._get_rule_from_state(state_events)
+
+ if medium != "email":
+ defer.returnValue(False)
+
+ if rule != ACCESS_RULE_RESTRICTED:
+ # Only "restricted" requires filtering 3PID invites. We don't need to do
+ # anything for "direct" here, because only "restricted" requires filtering
+ # based on the HS the address is mapped to.
+ defer.returnValue(True)
+
+ parsed_address = email.utils.parseaddr(address)[1]
+ if parsed_address != address:
+ # Avoid reproducing the security issue described here:
+ # https://matrix.org/blog/2019/04/18/security-update-sydent-1-0-2
+ # It's probably not worth it but let's just be overly safe here.
+ defer.returnValue(False)
+
+ # Get the HS this address belongs to from the identity server.
+ res = yield self.http_client.get_json(
+ "https://%s/_matrix/identity/api/v1/info" % (self.id_server,),
+ {"medium": medium, "address": address},
+ )
+
+ # Look for a domain that's not forbidden from being invited.
+ if not res.get("hs"):
+ defer.returnValue(False)
+ if res.get("hs") in self.domains_forbidden_when_restricted:
+ defer.returnValue(False)
+
+ defer.returnValue(True)
+
+ def check_event_allowed(self, event, state_events):
+ """Implements synapse.events.ThirdPartyEventRules.check_event_allowed
+
+ Checks the event's type and the current rule and calls the right function to
+ determine whether the event can be allowed.
+ """
+ if event.type == ACCESS_RULES_TYPE:
+ return self._on_rules_change(event, state_events)
+
+ # We need to know the rule to apply when processing the event types below.
+ rule = self._get_rule_from_state(state_events)
+
+ if event.type == EventTypes.PowerLevels:
+ return self._is_power_level_content_allowed(event.content, rule)
+
+ if event.type == EventTypes.Member or event.type == EventTypes.ThirdPartyInvite:
+ return self._on_membership_or_invite(event, rule, state_events)
+
+ if event.type == EventTypes.JoinRules:
+ return self._on_join_rule_change(event, rule)
+
+ if event.type == EventTypes.RoomAvatar:
+ return self._on_room_avatar_change(event, rule)
+
+ if event.type == EventTypes.Name:
+ return self._on_room_name_change(event, rule)
+
+ if event.type == EventTypes.Topic:
+ return self._on_room_topic_change(event, rule)
+
+ return True
+
+ def _on_rules_change(self, event, state_events):
+ """Implement the checks and behaviour specified on allowing or forbidding a new
+ im.vector.room.access_rules event.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ state_events (dict[tuple[event type, state key], EventBase]): The state of the
+ room before the event was sent.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ new_rule = event.content.get("rule")
+
+ # Check for invalid values.
+ if new_rule not in VALID_ACCESS_RULES:
+ return False
+
+ # We must not allow rooms with the "public" join rule to be given any other access
+ # rule than "restricted".
+ join_rule = self._get_join_rule_from_state(state_events)
+ if join_rule == JoinRules.PUBLIC and new_rule != ACCESS_RULE_RESTRICTED:
+ return False
+
+ # Make sure we don't apply "direct" if the room has more than two members.
+ if new_rule == ACCESS_RULE_DIRECT:
+ existing_members, threepid_tokens = self._get_members_and_tokens_from_state(
+ state_events
+ )
+
+ if len(existing_members) > 2 or len(threepid_tokens) > 1:
+ return False
+
+ prev_rules_event = state_events.get((ACCESS_RULES_TYPE, ""))
+
+ # Now that we know the new rule doesn't break the "direct" case, we can allow any
+ # new rule in rooms that had none before.
+ if prev_rules_event is None:
+ return True
+
+ prev_rule = prev_rules_event.content.get("rule")
+
+ # Currently, we can only go from "restricted" to "unrestricted".
+ if prev_rule == ACCESS_RULE_RESTRICTED and new_rule == ACCESS_RULE_UNRESTRICTED:
+ return True
+
+ return False
+
+ def _on_membership_or_invite(self, event, rule, state_events):
+ """Applies the correct rule for incoming m.room.member and
+ m.room.third_party_invite events.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ rule (str): The name of the rule to apply.
+ state_events (dict[tuple[event type, state key], EventBase]): The state of the
+ room before the event was sent.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ if rule == ACCESS_RULE_RESTRICTED:
+ ret = self._on_membership_or_invite_restricted(event)
+ elif rule == ACCESS_RULE_UNRESTRICTED:
+ ret = self._on_membership_or_invite_unrestricted()
+ elif rule == ACCESS_RULE_DIRECT:
+ ret = self._on_membership_or_invite_direct(event, state_events)
+ else:
+ # We currently apply the default (restricted) if we don't know the rule, we
+ # might want to change that in the future.
+ ret = self._on_membership_or_invite_restricted(event)
+
+ return ret
+
+ def _on_membership_or_invite_restricted(self, event):
+ """Implements the checks and behaviour specified for the "restricted" rule.
+
+ "restricted" currently means that users can only invite users if their server is
+ included in a limited list of domains.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ # We're not applying the rules on m.room.third_party_member events here because
+ # the filtering on threepids is done in check_threepid_can_be_invited, which is
+ # called before check_event_allowed.
+ if event.type == EventTypes.ThirdPartyInvite:
+ return True
+
+ # We only need to process "join" and "invite" memberships, in order to be backward
+ # compatible, e.g. if a user from a blacklisted server joined a restricted room
+ # before the rules started being enforced on the server, that user must be able to
+ # leave it.
+ if event.membership not in [Membership.JOIN, Membership.INVITE]:
+ return True
+
+ invitee_domain = get_domain_from_id(event.state_key)
+ return invitee_domain not in self.domains_forbidden_when_restricted
+
+ def _on_membership_or_invite_unrestricted(self):
+ """Implements the checks and behaviour specified for the "unrestricted" rule.
+
+ "unrestricted" currently means that every event is allowed.
+
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ return True
+
+ def _on_membership_or_invite_direct(self, event, state_events):
+ """Implements the checks and behaviour specified for the "direct" rule.
+
+ "direct" currently means that no member is allowed apart from the two initial
+ members the room was created for (i.e. the room's creator and their first
+ invitee).
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ state_events (dict[tuple[event type, state key], EventBase]): The state of the
+ room before the event was sent.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ # Get the room memberships and 3PID invite tokens from the room's state.
+ existing_members, threepid_tokens = self._get_members_and_tokens_from_state(
+ state_events
+ )
+
+ # There should never be more than one 3PID invite in the room state: if the second
+ # original user came and left, and we're inviting them using their email address,
+ # given we know they have a Matrix account binded to the address (so they could
+ # join the first time), Synapse will successfully look it up before attempting to
+ # store an invite on the IS.
+ if len(threepid_tokens) == 1 and event.type == EventTypes.ThirdPartyInvite:
+ # If we already have a 3PID invite in flight, don't accept another one, unless
+ # the new one has the same invite token as its state key. This is because 3PID
+ # invite revocations must be allowed, and a revocation is basically a new 3PID
+ # invite event with an empty content and the same token as the invite it
+ # revokes.
+ return event.state_key in threepid_tokens
+
+ if len(existing_members) == 2:
+ # If the user was within the two initial user of the room, Synapse would have
+ # looked it up successfully and thus sent a m.room.member here instead of
+ # m.room.third_party_invite.
+ if event.type == EventTypes.ThirdPartyInvite:
+ return False
+
+ # We can only have m.room.member events here. The rule in this case is to only
+ # allow the event if its target is one of the initial two members in the room,
+ # i.e. the state key of one of the two m.room.member states in the room.
+ return event.state_key in existing_members
+
+ # We're alone in the room (and always have been) and there's one 3PID invite in
+ # flight.
+ if len(existing_members) == 1 and len(threepid_tokens) == 1:
+ # We can only have m.room.member events here. In this case, we can only allow
+ # the event if it's either a m.room.member from the joined user (we can assume
+ # that the only m.room.member event is a join otherwise we wouldn't be able to
+ # send an event to the room) or an an invite event which target is the invited
+ # user.
+ target = event.state_key
+ is_from_threepid_invite = self._is_invite_from_threepid(
+ event, threepid_tokens[0]
+ )
+ if is_from_threepid_invite or target == existing_members[0]:
+ return True
+
+ return False
+
+ return True
+
+ def _is_power_level_content_allowed(self, content, access_rule):
+ """Check if a given power levels event is permitted under the given access rule.
+
+ It shouldn't be allowed if it either changes the default PL to a non-0 value or
+ gives a non-0 PL to a user that would have been forbidden from joining the room
+ under a more restrictive access rule.
+
+ Args:
+ content (dict[]): The content of the m.room.power_levels event to check.
+ access_rule (str): The access rule in place in this room.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ # Check if we need to apply the restrictions with the current rule.
+ if access_rule not in RULES_WITH_RESTRICTED_POWER_LEVELS:
+ return True
+
+ # If users_default is explicitly set to a non-0 value, deny the event.
+ users_default = content.get("users_default", 0)
+ if users_default:
+ return False
+
+ users = content.get("users", {})
+ for user_id, power_level in users.items():
+ server_name = get_domain_from_id(user_id)
+ # Check the domain against the blacklist. If found, and the PL isn't 0, deny
+ # the event.
+ if (
+ server_name in self.domains_forbidden_when_restricted
+ and power_level != 0
+ ):
+ return False
+
+ return True
+
+ def _on_join_rule_change(self, event, rule):
+ """Check whether a join rule change is allowed. A join rule change is always
+ allowed unless the new join rule is "public" and the current access rule isn't
+ "restricted".
+ The rationale is that external users (those whose server would be denied access
+ to rooms enforcing the "restricted" access rule) should always rely on non-
+ external users for access to rooms, therefore they shouldn't be able to access
+ rooms that don't require an invite to be joined.
+
+ Note that we currently rely on the default access rule being "restricted": during
+ room creation, the m.room.join_rules event will be sent *before* the
+ im.vector.room.access_rules one, so the access rule that will be considered here
+ in this case will be the default "restricted" one. This is fine since the
+ "restricted" access rule allows any value for the join rule, but we should keep
+ that in mind if we need to change the default access rule in the future.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ rule (str): The name of the rule to apply.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ if event.content.get("join_rule") == JoinRules.PUBLIC:
+ return rule == ACCESS_RULE_RESTRICTED
+
+ return True
+
+ def _on_room_avatar_change(self, event, rule):
+ """Check whether a change of room avatar is allowed.
+ The current rule is to forbid such a change in direct chats but allow it
+ everywhere else.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ rule (str): The name of the rule to apply.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ return rule != ACCESS_RULE_DIRECT
+
+ def _on_room_name_change(self, event, rule):
+ """Check whether a change of room name is allowed.
+ The current rule is to forbid such a change in direct chats but allow it
+ everywhere else.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ rule (str): The name of the rule to apply.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ return rule != ACCESS_RULE_DIRECT
+
+ def _on_room_topic_change(self, event, rule):
+ """Check whether a change of room topic is allowed.
+ The current rule is to forbid such a change in direct chats but allow it
+ everywhere else.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ rule (str): The name of the rule to apply.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ return rule != ACCESS_RULE_DIRECT
+
+ @staticmethod
+ def _get_rule_from_state(state_events):
+ """Extract the rule to be applied from the given set of state events.
+
+ Args:
+ state_events (dict[tuple[event type, state key], EventBase]): The set of state
+ events.
+ Returns:
+ str, the name of the rule (either "direct", "restricted" or "unrestricted")
+ """
+ access_rules = state_events.get((ACCESS_RULES_TYPE, ""))
+ if access_rules is None:
+ rule = ACCESS_RULE_RESTRICTED
+ else:
+ rule = access_rules.content.get("rule")
+ return rule
+
+ @staticmethod
+ def _get_join_rule_from_state(state_events):
+ """Extract the room's join rule from the given set of state events.
+
+ Args:
+ state_events (dict[tuple[event type, state key], EventBase]): The set of state
+ events.
+ Returns:
+ str, the name of the join rule (either "public", or "invite")
+ """
+ join_rule_event = state_events.get((EventTypes.JoinRules, ""))
+ if join_rule_event is None:
+ return None
+ return join_rule_event.content.get("join_rule")
+
+ @staticmethod
+ def _get_members_and_tokens_from_state(state_events):
+ """Retrieves from a list of state events the list of users that have a
+ m.room.member event in the room, and the tokens of 3PID invites in the room.
+
+ Args:
+ state_events (dict[tuple[event type, state key], EventBase]): The set of state
+ events.
+ Returns:
+ existing_members (list[str]): List of targets of the m.room.member events in
+ the state.
+ threepid_invite_tokens (list[str]): List of tokens of the 3PID invites in the
+ state.
+ """
+ existing_members = []
+ threepid_invite_tokens = []
+ for key, state_event in state_events.items():
+ if key[0] == EventTypes.Member and state_event.content:
+ existing_members.append(state_event.state_key)
+ if key[0] == EventTypes.ThirdPartyInvite and state_event.content:
+ # Don't include revoked invites.
+ threepid_invite_tokens.append(state_event.state_key)
+
+ return existing_members, threepid_invite_tokens
+
+ @staticmethod
+ def _is_invite_from_threepid(invite, threepid_invite_token):
+ """Checks whether the given invite follows the given 3PID invite.
+
+ Args:
+ invite (EventBase): The m.room.member event with "invite" membership.
+ threepid_invite_token (str): The state key from the 3PID invite.
+ """
+ token = (
+ invite.content.get("third_party_invite", {})
+ .get("signed", {})
+ .get("token", "")
+ )
+
+ return token == threepid_invite_token
diff --git a/synapse/types.py b/synapse/types.py
index 51eadb6ad4..94c01b0a18 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -16,6 +16,8 @@ import re
import string
from collections import namedtuple
+from six.moves import filter
+
import attr
from synapse.api.errors import SynapseError
@@ -235,6 +237,19 @@ def contains_invalid_mxid_characters(localpart):
return any(c not in mxid_localpart_allowed_characters for c in localpart)
+def strip_invalid_mxid_characters(localpart):
+ """Removes any invalid characters from an mxid
+
+ Args:
+ localpart (basestring): the localpart to be stripped
+
+ Returns:
+ localpart (basestring): the localpart having been stripped
+ """
+ filtered = filter(lambda c: c in mxid_localpart_allowed_characters, localpart)
+ return "".join(filtered)
+
+
UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
# the following is a pattern which matches '=', and bytes which are not allowed in a mxid
diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py
index 2af8ca43b1..99646c7cf0 100644
--- a/synapse/util/caches/ttlcache.py
+++ b/synapse/util/caches/ttlcache.py
@@ -55,7 +55,7 @@ class TTLCache(object):
if e != SENTINEL:
self._expiry_list.remove(e)
- entry = _CacheEntry(expiry_time=expiry, key=key, value=value)
+ entry = _CacheEntry(expiry_time=expiry, ttl=ttl, key=key, value=value)
self._data[key] = entry
self._expiry_list.add(entry)
@@ -87,7 +87,8 @@ class TTLCache(object):
key: key to look up
Returns:
- Tuple[Any, float]: the value from the cache, and the expiry time
+ Tuple[Any, float, float]: the value from the cache, the expiry time
+ and the TTL
Raises:
KeyError if the entry is not found
@@ -99,7 +100,7 @@ class TTLCache(object):
self._metrics.inc_misses()
raise
self._metrics.inc_hits()
- return e.value, e.expiry_time
+ return e.value, e.expiry_time, e.ttl
def pop(self, key, default=SENTINEL):
"""Remove a value from the cache
@@ -158,5 +159,6 @@ class _CacheEntry(object):
# expiry_time is the first attribute, so that entries are sorted by expiry.
expiry_time = attr.ib()
+ ttl = attr.ib()
key = attr.ib()
value = attr.ib()
diff --git a/synapse/util/hash.py b/synapse/util/hash.py
new file mode 100644
index 0000000000..359168704e
--- /dev/null
+++ b/synapse/util/hash.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+
+import unpaddedbase64
+
+
+def sha256_and_url_safe_base64(input_text):
+ """SHA256 hash an input string, encode the digest as url-safe base64, and
+ return
+
+ :param input_text: string to hash
+ :type input_text: str
+
+ :returns a sha256 hashed and url-safe base64 encoded digest
+ :rtype: str
+ """
+ digest = hashlib.sha256(input_text.encode()).digest()
+ return unpaddedbase64.encode_base64(digest, urlsafe=True)
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index 0862b5ca5a..b740913b58 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -22,6 +22,15 @@ from synapse.api.errors import CodeMessageException
logger = logging.getLogger(__name__)
+# the intial backoff, after the first transaction fails
+MIN_RETRY_INTERVAL = 10 * 60 * 1000
+
+# how much we multiply the backoff by after each subsequent fail
+RETRY_MULTIPLIER = 5
+
+# a cap on the backoff. (Essentially none)
+MAX_RETRY_INTERVAL = 2 ** 63
+
class NotRetryingDestination(Exception):
def __init__(self, retry_last_ts, retry_interval, destination):
@@ -112,9 +121,6 @@ class RetryDestinationLimiter(object):
clock,
store,
retry_interval,
- min_retry_interval=10 * 60 * 1000,
- max_retry_interval=24 * 60 * 60 * 1000,
- multiplier_retry_interval=5,
backoff_on_404=False,
backoff_on_failure=True,
):
@@ -130,12 +136,6 @@ class RetryDestinationLimiter(object):
retry_interval (int): The next retry interval taken from the
database in milliseconds, or zero if the last request was
successful.
- min_retry_interval (int): The minimum retry interval to use after
- a failed request, in milliseconds.
- max_retry_interval (int): The maximum retry interval to use after
- a failed request, in milliseconds.
- multiplier_retry_interval (int): The multiplier to use to increase
- the retry interval after a failed request.
backoff_on_404 (bool): Back off if we get a 404
backoff_on_failure (bool): set to False if we should not increase the
@@ -146,9 +146,6 @@ class RetryDestinationLimiter(object):
self.destination = destination
self.retry_interval = retry_interval
- self.min_retry_interval = min_retry_interval
- self.max_retry_interval = max_retry_interval
- self.multiplier_retry_interval = multiplier_retry_interval
self.backoff_on_404 = backoff_on_404
self.backoff_on_failure = backoff_on_failure
@@ -196,13 +193,14 @@ class RetryDestinationLimiter(object):
else:
# We couldn't connect.
if self.retry_interval:
- self.retry_interval *= self.multiplier_retry_interval
- self.retry_interval *= int(random.uniform(0.8, 1.4))
+ self.retry_interval = int(
+ self.retry_interval * RETRY_MULTIPLIER * random.uniform(0.8, 1.4)
+ )
- if self.retry_interval >= self.max_retry_interval:
- self.retry_interval = self.max_retry_interval
+ if self.retry_interval >= MAX_RETRY_INTERVAL:
+ self.retry_interval = MAX_RETRY_INTERVAL
else:
- self.retry_interval = self.min_retry_interval
+ self.retry_interval = MIN_RETRY_INTERVAL
logger.info(
"Connection to %s was unsuccessful (%s(%s)); backoff now %i",
diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
index 982c6d81ca..6a2464cab3 100644
--- a/synapse/util/stringutils.py
+++ b/synapse/util/stringutils.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,12 +15,15 @@
# limitations under the License.
import random
+import re
import string
import six
from six import PY2, PY3
from six.moves import range
+from synapse.api.errors import Codes, SynapseError
+
_string_with_symbols = string.digits + string.ascii_letters + ".,;:^&*-_+=#~@"
# random_string and random_string_with_symbols are used for a range of things,
@@ -27,6 +31,8 @@ _string_with_symbols = string.digits + string.ascii_letters + ".,;:^&*-_+=#~@"
# we get cryptographically-secure randoms.
rand = random.SystemRandom()
+client_secret_regex = re.compile(r"^[0-9a-zA-Z.=_-]+$")
+
def random_string(length):
return "".join(rand.choice(string.ascii_letters) for _ in range(length))
@@ -109,3 +115,11 @@ def exception_to_unicode(e):
return msg.decode("utf-8", errors="replace")
else:
return msg
+
+
+def assert_valid_client_secret(client_secret):
+ """Validate that a given string matches the client_secret regex defined by the spec"""
+ if client_secret_regex.match(client_secret) is None:
+ raise SynapseError(
+ 400, "Invalid client_secret parameter", errcode=Codes.INVALID_PARAM
+ )
diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py
index 3ec1dfb0c2..34ce7cac16 100644
--- a/synapse/util/threepids.py
+++ b/synapse/util/threepids.py
@@ -16,11 +16,14 @@
import logging
import re
+from twisted.internet import defer
+
logger = logging.getLogger(__name__)
+@defer.inlineCallbacks
def check_3pid_allowed(hs, medium, address):
- """Checks whether a given format of 3PID is allowed to be used on this HS
+ """Checks whether a given 3PID is allowed to be used on this HS
Args:
hs (synapse.server.HomeServer): server
@@ -28,9 +31,36 @@ def check_3pid_allowed(hs, medium, address):
address (str): address within that medium (e.g. "wotan@matrix.org")
msisdns need to first have been canonicalised
Returns:
- bool: whether the 3PID medium/address is allowed to be added to this HS
+ defered bool: whether the 3PID medium/address is allowed to be added to this HS
"""
+ if hs.config.check_is_for_allowed_local_3pids:
+ data = yield hs.get_simple_http_client().get_json(
+ "https://%s%s"
+ % (
+ hs.config.check_is_for_allowed_local_3pids,
+ "/_matrix/identity/api/v1/internal-info",
+ ),
+ {"medium": medium, "address": address},
+ )
+
+ # Check for invalid response
+ if "hs" not in data and "shadow_hs" not in data:
+ defer.returnValue(False)
+
+ # Check if this user is intended to register for this homeserver
+ if (
+ data.get("hs") != hs.config.server_name
+ and data.get("shadow_hs") != hs.config.server_name
+ ):
+ defer.returnValue(False)
+
+ if data.get("requires_invite", False) and not data.get("invited", False):
+ # Requires an invite but hasn't been invited
+ defer.returnValue(False)
+
+ defer.returnValue(True)
+
if hs.config.allowed_local_3pids:
for constraint in hs.config.allowed_local_3pids:
logger.debug(
@@ -43,8 +73,8 @@ def check_3pid_allowed(hs, medium, address):
if medium == constraint["medium"] and re.match(
constraint["pattern"], address
):
- return True
+ defer.returnValue(True)
else:
- return True
+ defer.returnValue(True)
- return False
+ defer.returnValue(False)
diff --git a/synapse/visibility.py b/synapse/visibility.py
index bf0f1eebd8..a19011b793 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -43,7 +43,12 @@ MEMBERSHIP_PRIORITY = (
@defer.inlineCallbacks
def filter_events_for_client(
- store, user_id, events, is_peeking=False, always_include_ids=frozenset()
+ store,
+ user_id,
+ events,
+ is_peeking=False,
+ always_include_ids=frozenset(),
+ apply_retention_policies=True,
):
"""
Check which events a user is allowed to see
@@ -59,6 +64,10 @@ def filter_events_for_client(
events
always_include_ids (set(event_id)): set of event ids to specifically
include (unless sender is ignored)
+ apply_retention_policies (bool): Whether to filter out events that's older than
+ allowed by the room's retention policy. Useful when this function is called
+ to e.g. check whether a user should be allowed to see the state at a given
+ event rather than to know if it should send an event to a user's client(s).
Returns:
Deferred[list[synapse.events.EventBase]]
@@ -86,6 +95,15 @@ def filter_events_for_client(
erased_senders = yield store.are_users_erased((e.sender for e in events))
+ if apply_retention_policies:
+ room_ids = set(e.room_id for e in events)
+ retention_policies = {}
+
+ for room_id in room_ids:
+ retention_policies[room_id] = (
+ yield store.get_retention_policy_for_room(room_id)
+ )
+
def allowed(event):
"""
Args:
@@ -103,6 +121,18 @@ def filter_events_for_client(
if not event.is_state() and event.sender in ignore_list:
return None
+ # Don't try to apply the room's retention policy if the event is a state event, as
+ # MSC1763 states that retention is only considered for non-state events.
+ if apply_retention_policies and not event.is_state():
+ retention_policy = retention_policies[event.room_id]
+ max_lifetime = retention_policy.get("max_lifetime")
+
+ if max_lifetime is not None:
+ oldest_allowed_ts = store.clock.time_msec() - max_lifetime
+
+ if event.origin_server_ts < oldest_allowed_ts:
+ return None
+
if event.event_id in always_include_ids:
return event
diff --git a/synctl b/synctl
index 794de99ea3..a9629cf0e8 100755
--- a/synctl
+++ b/synctl
@@ -30,6 +30,8 @@ from six import iteritems
import yaml
+from synapse.config import find_config_files
+
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
GREEN = "\x1b[1;32m"
@@ -135,7 +137,8 @@ def main():
"configfile",
nargs="?",
default="homeserver.yaml",
- help="the homeserver config file, defaults to homeserver.yaml",
+ help="the homeserver config file. Defaults to homeserver.yaml. May also be"
+ " a directory with *.yaml files",
)
parser.add_argument(
"-w", "--worker", metavar="WORKERCONFIG", help="start or stop a single worker"
@@ -176,8 +179,12 @@ def main():
)
sys.exit(1)
- with open(configfile) as stream:
- config = yaml.safe_load(stream)
+ config_files = find_config_files([configfile])
+ config = {}
+ for config_file in config_files:
+ with open(config_file) as file_stream:
+ yaml_config = yaml.safe_load(file_stream)
+ config.update(yaml_config)
pidfile = config["pid_file"]
cache_factor = config.get("synctl_cache_factor")
diff --git a/sytest-blacklist b/sytest-blacklist
index 11785fd43f..7d6b1d0a2f 100644
--- a/sytest-blacklist
+++ b/sytest-blacklist
@@ -1,6 +1,6 @@
# This file serves as a blacklist for SyTest tests that we expect will fail in
# Synapse.
-#
+#
# Each line of this file is scanned by sytest during a run and if the line
# exactly matches the name of a test, it will be marked as "expected fail",
# meaning the test will still run, but failure will not mark the entire test
@@ -29,3 +29,24 @@ Enabling an unknown default rule fails with 404
# Blacklisted due to https://github.com/matrix-org/synapse/issues/1663
New federated private chats get full presence information (SYN-115)
+
+# flaky test
+If remote user leaves room we no longer receive device updates
+
+# flaky test
+Can re-join room if re-invited
+
+# flaky test
+Forgotten room messages cannot be paginated
+
+# flaky test
+Local device key changes get to remote servers
+
+# flaky test
+Old leaves are present in gapped incremental syncs
+
+# flaky test on workers
+Old members are included in gappy incr LL sync if they start speaking
+
+# flaky test on workers
+Presence changes to UNAVAILABLE are reported to remote room members
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index c0cb8ef296..6121efcfa9 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -21,6 +21,7 @@ from twisted.internet import defer
import synapse.handlers.auth
from synapse.api.auth import Auth
+from synapse.api.constants import UserTypes
from synapse.api.errors import (
AuthError,
Codes,
@@ -336,6 +337,23 @@ class AuthTestCase(unittest.TestCase):
yield self.auth.check_auth_blocking()
@defer.inlineCallbacks
+ def test_blocking_mau__depending_on_user_type(self):
+ self.hs.config.max_mau_value = 50
+ self.hs.config.limit_usage_by_mau = True
+
+ self.store.get_monthly_active_count = Mock(return_value=defer.succeed(100))
+ # Support users allowed
+ yield self.auth.check_auth_blocking(user_type=UserTypes.SUPPORT)
+ self.store.get_monthly_active_count = Mock(return_value=defer.succeed(100))
+ # Bots not allowed
+ with self.assertRaises(ResourceLimitError):
+ yield self.auth.check_auth_blocking(user_type=UserTypes.BOT)
+ self.store.get_monthly_active_count = Mock(return_value=defer.succeed(100))
+ # Real users not allowed
+ with self.assertRaises(ResourceLimitError):
+ yield self.auth.check_auth_blocking()
+
+ @defer.inlineCallbacks
def test_reserved_threepid(self):
self.hs.config.limit_usage_by_mau = True
self.hs.config.max_mau_value = 1
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
index 04b8c2c07c..52f89d3f83 100644
--- a/tests/appservice/test_scheduler.py
+++ b/tests/appservice/test_scheduler.py
@@ -37,11 +37,9 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
self.recoverer = Mock()
self.recoverer_fn = Mock(return_value=self.recoverer)
self.txnctrl = _TransactionController(
- clock=self.clock,
- store=self.store,
- as_api=self.as_api,
- recoverer_fn=self.recoverer_fn,
+ clock=self.clock, store=self.store, as_api=self.as_api
)
+ self.txnctrl.RECOVERER_CLASS = self.recoverer_fn
def test_single_service_up_txn_sent(self):
# Test: The AS is up and the txn is successfully sent.
diff --git a/tests/config/test_database.py b/tests/config/test_database.py
new file mode 100644
index 0000000000..151d3006ac
--- /dev/null
+++ b/tests/config/test_database.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import yaml
+
+from synapse.config.database import DatabaseConfig
+
+from tests import unittest
+
+
+class DatabaseConfigTestCase(unittest.TestCase):
+ def test_database_configured_correctly_no_database_conf_param(self):
+ conf = yaml.safe_load(
+ DatabaseConfig().generate_config_section("/data_dir_path", None)
+ )
+
+ expected_database_conf = {
+ "name": "sqlite3",
+ "args": {"database": "/data_dir_path/homeserver.db"},
+ }
+
+ self.assertEqual(conf["database"], expected_database_conf)
+
+ def test_database_configured_correctly_database_conf_param(self):
+
+ database_conf = {
+ "name": "my super fast datastore",
+ "args": {
+ "user": "matrix",
+ "password": "synapse_database_password",
+ "host": "synapse_database_host",
+ "database": "matrix",
+ },
+ }
+
+ conf = yaml.safe_load(
+ DatabaseConfig().generate_config_section("/data_dir_path", database_conf)
+ )
+
+ self.assertEqual(conf["database"], database_conf)
diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py
index 5017cbce85..2684e662de 100644
--- a/tests/config/test_generate.py
+++ b/tests/config/test_generate.py
@@ -17,6 +17,8 @@ import os.path
import re
import shutil
import tempfile
+from contextlib import redirect_stdout
+from io import StringIO
from synapse.config.homeserver import HomeServerConfig
@@ -32,17 +34,18 @@ class ConfigGenerationTestCase(unittest.TestCase):
shutil.rmtree(self.dir)
def test_generate_config_generates_files(self):
- HomeServerConfig.load_or_generate_config(
- "",
- [
- "--generate-config",
- "-c",
- self.file,
- "--report-stats=yes",
- "-H",
- "lemurs.win",
- ],
- )
+ with redirect_stdout(StringIO()):
+ HomeServerConfig.load_or_generate_config(
+ "",
+ [
+ "--generate-config",
+ "-c",
+ self.file,
+ "--report-stats=yes",
+ "-H",
+ "lemurs.win",
+ ],
+ )
self.assertSetEqual(
set(["homeserver.yaml", "lemurs.win.log.config", "lemurs.win.signing.key"]),
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index 6bfc1970ad..b3e557bd6a 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -15,6 +15,8 @@
import os.path
import shutil
import tempfile
+from contextlib import redirect_stdout
+from io import StringIO
import yaml
@@ -26,7 +28,6 @@ from tests import unittest
class ConfigLoadingTestCase(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
- print(self.dir)
self.file = os.path.join(self.dir, "homeserver.yaml")
def tearDown(self):
@@ -94,18 +95,27 @@ class ConfigLoadingTestCase(unittest.TestCase):
)
self.assertTrue(config.enable_registration)
+ def test_stats_enabled(self):
+ self.generate_config_and_remove_lines_containing("enable_metrics")
+ self.add_lines_to_config(["enable_metrics: true"])
+
+ # The default Metrics Flags are off by default.
+ config = HomeServerConfig.load_config("", ["-c", self.file])
+ self.assertFalse(config.metrics_flags.known_servers)
+
def generate_config(self):
- HomeServerConfig.load_or_generate_config(
- "",
- [
- "--generate-config",
- "-c",
- self.file,
- "--report-stats=yes",
- "-H",
- "lemurs.win",
- ],
- )
+ with redirect_stdout(StringIO()):
+ HomeServerConfig.load_or_generate_config(
+ "",
+ [
+ "--generate-config",
+ "-c",
+ self.file,
+ "--report-stats=yes",
+ "-H",
+ "lemurs.win",
+ ],
+ )
def generate_config_and_remove_lines_containing(self, needle):
self.generate_config()
diff --git a/tests/config/test_server.py b/tests/config/test_server.py
index 1ca5ea54ca..a10d017120 100644
--- a/tests/config/test_server.py
+++ b/tests/config/test_server.py
@@ -13,7 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.config.server import is_threepid_reserved
+import yaml
+
+from synapse.config.server import ServerConfig, is_threepid_reserved
from tests import unittest
@@ -29,3 +31,100 @@ class ServerConfigTestCase(unittest.TestCase):
self.assertTrue(is_threepid_reserved(config, user1))
self.assertFalse(is_threepid_reserved(config, user3))
self.assertFalse(is_threepid_reserved(config, user1_msisdn))
+
+ def test_unsecure_listener_no_listeners_open_private_ports_false(self):
+ conf = yaml.safe_load(
+ ServerConfig().generate_config_section(
+ "che.org", "/data_dir_path", False, None
+ )
+ )
+
+ expected_listeners = [
+ {
+ "port": 8008,
+ "tls": False,
+ "type": "http",
+ "x_forwarded": True,
+ "bind_addresses": ["::1", "127.0.0.1"],
+ "resources": [{"names": ["client", "federation"], "compress": False}],
+ }
+ ]
+
+ self.assertEqual(conf["listeners"], expected_listeners)
+
+ def test_unsecure_listener_no_listeners_open_private_ports_true(self):
+ conf = yaml.safe_load(
+ ServerConfig().generate_config_section(
+ "che.org", "/data_dir_path", True, None
+ )
+ )
+
+ expected_listeners = [
+ {
+ "port": 8008,
+ "tls": False,
+ "type": "http",
+ "x_forwarded": True,
+ "resources": [{"names": ["client", "federation"], "compress": False}],
+ }
+ ]
+
+ self.assertEqual(conf["listeners"], expected_listeners)
+
+ def test_listeners_set_correctly_open_private_ports_false(self):
+ listeners = [
+ {
+ "port": 8448,
+ "resources": [{"names": ["federation"]}],
+ "tls": True,
+ "type": "http",
+ },
+ {
+ "port": 443,
+ "resources": [{"names": ["client"]}],
+ "tls": False,
+ "type": "http",
+ },
+ ]
+
+ conf = yaml.safe_load(
+ ServerConfig().generate_config_section(
+ "this.one.listens", "/data_dir_path", True, listeners
+ )
+ )
+
+ self.assertEqual(conf["listeners"], listeners)
+
+ def test_listeners_set_correctly_open_private_ports_true(self):
+ listeners = [
+ {
+ "port": 8448,
+ "resources": [{"names": ["federation"]}],
+ "tls": True,
+ "type": "http",
+ },
+ {
+ "port": 443,
+ "resources": [{"names": ["client"]}],
+ "tls": False,
+ "type": "http",
+ },
+ {
+ "port": 1243,
+ "resources": [{"names": ["client"]}],
+ "tls": False,
+ "type": "http",
+ "bind_addresses": ["this_one_is_bound"],
+ },
+ ]
+
+ expected_listeners = listeners.copy()
+ expected_listeners[1]["bind_addresses"] = ["::1", "127.0.0.1"]
+
+ conf = yaml.safe_load(
+ ServerConfig().generate_config_section(
+ "this.one.listens", "/data_dir_path", True, listeners
+ )
+ )
+
+ self.assertEqual(conf["listeners"], expected_listeners)
diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py
index 4f8a87a3df..8e0c4b9533 100644
--- a/tests/config/test_tls.py
+++ b/tests/config/test_tls.py
@@ -16,6 +16,8 @@
import os
+import yaml
+
from OpenSSL import SSL
from synapse.config.tls import ConfigError, TlsConfig
@@ -191,3 +193,45 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg=
self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1, 0)
self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_1, 0)
self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_2, 0)
+
+ def test_acme_disabled_in_generated_config_no_acme_domain_provied(self):
+ """
+ Checks acme is disabled by default.
+ """
+ conf = TestConfig()
+ conf.read_config(
+ yaml.safe_load(
+ TestConfig().generate_config_section(
+ "/config_dir_path",
+ "my_super_secure_server",
+ "/data_dir_path",
+ "/tls_cert_path",
+ "tls_private_key",
+ None, # This is the acme_domain
+ )
+ ),
+ "/config_dir_path",
+ )
+
+ self.assertFalse(conf.acme_enabled)
+
+ def test_acme_enabled_in_generated_config_domain_provided(self):
+ """
+ Checks acme is enabled if the acme_domain arg is set to some string.
+ """
+ conf = TestConfig()
+ conf.read_config(
+ yaml.safe_load(
+ TestConfig().generate_config_section(
+ "/config_dir_path",
+ "my_super_secure_server",
+ "/data_dir_path",
+ "/tls_cert_path",
+ "tls_private_key",
+ "my_supe_secure_server", # This is the acme_domain
+ )
+ ),
+ "/config_dir_path",
+ )
+
+ self.assertTrue(conf.acme_enabled)
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index af15f4cc5a..b08be451aa 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -20,7 +20,6 @@ from synapse.federation.federation_server import server_matches_acl_event
from tests import unittest
-@unittest.DEBUG
class ServerACLsTestCase(unittest.TestCase):
def test_blacklisted_server(self):
e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]})
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
new file mode 100644
index 0000000000..15b694b294
--- /dev/null
+++ b/tests/handlers/test_federation.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.api.constants import EventTypes
+from synapse.api.errors import AuthError, Codes
+from synapse.rest import admin
+from synapse.rest.client.v1 import login, room
+
+from tests import unittest
+
+
+class FederationTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ hs = self.setup_test_homeserver(http_client=None)
+ self.handler = hs.get_handlers().federation_handler
+ self.store = hs.get_datastore()
+ return hs
+
+ def test_exchange_revoked_invite(self):
+ user_id = self.register_user("kermit", "test")
+ tok = self.login("kermit", "test")
+
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+
+ # Send a 3PID invite event with an empty body so it's considered as a revoked one.
+ invite_token = "sometoken"
+ self.helper.send_state(
+ room_id=room_id,
+ event_type=EventTypes.ThirdPartyInvite,
+ state_key=invite_token,
+ body={},
+ tok=tok,
+ )
+
+ d = self.handler.on_exchange_third_party_invite_request(
+ room_id=room_id,
+ event_dict={
+ "type": EventTypes.Member,
+ "room_id": room_id,
+ "sender": user_id,
+ "state_key": "@someone:example.org",
+ "content": {
+ "membership": "invite",
+ "third_party_invite": {
+ "display_name": "alice",
+ "signed": {
+ "mxid": "@alice:localhost",
+ "token": invite_token,
+ "signatures": {
+ "magic.forest": {
+ "ed25519:3": (
+ "fQpGIW1Snz+pwLZu6sTy2aHy/DYWWTspTJRPyNp0PKkymfIs"
+ "NffysMl6ObMMFdIJhk6g6pwlIqZ54rxo8SLmAg"
+ )
+ }
+ },
+ },
+ },
+ },
+ },
+ )
+
+ failure = self.get_failure(d, AuthError).value
+
+ self.assertEqual(failure.code, 403, failure)
+ self.assertEqual(failure.errcode, Codes.FORBIDDEN, failure)
+ self.assertEqual(failure.msg, "You are not invited to this room.")
diff --git a/tests/handlers/test_identity.py b/tests/handlers/test_identity.py
new file mode 100644
index 0000000000..e312f72f7c
--- /dev/null
+++ b/tests/handlers/test_identity.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from twisted.internet import defer
+
+import synapse.rest.admin
+from synapse.rest.client.v1 import login
+from synapse.rest.client.v2_alpha import account
+
+from tests import unittest
+
+
+class ThreepidISRewrittenURLTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ account.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ self.address = "test@test"
+ self.is_server_name = "testis"
+ self.rewritten_is_url = "int.testis"
+
+ config = self.default_config()
+ config["trusted_third_party_id_servers"] = [self.is_server_name]
+ config["rewrite_identity_server_urls"] = {
+ self.is_server_name: self.rewritten_is_url
+ }
+
+ mock_http_client = Mock(spec=["post_json_get_json"])
+ mock_http_client.post_json_get_json.return_value = defer.succeed(
+ {"address": self.address, "medium": "email"}
+ )
+
+ self.hs = self.setup_test_homeserver(
+ config=config, simple_http_client=mock_http_client
+ )
+
+ return self.hs
+
+ def prepare(self, reactor, clock, hs):
+ self.user_id = self.register_user("kermit", "monkey")
+
+ def test_rewritten_id_server(self):
+ """
+ Tests that, when validating a 3PID association while rewriting the IS's server
+ name:
+ * the bind request is done against the rewritten hostname
+ * the original, non-rewritten, server name is stored in the database
+ """
+ handler = self.hs.get_handlers().identity_handler
+ post_json_get_json = self.hs.get_simple_http_client().post_json_get_json
+ store = self.hs.get_datastore()
+
+ creds = {"sid": "123", "client_secret": "some_secret"}
+
+ # Make sure processing the mocked response goes through.
+ data = self.get_success(
+ handler.bind_threepid(
+ {
+ "id_server": self.is_server_name,
+ "client_secret": creds["client_secret"],
+ "sid": creds["sid"],
+ },
+ self.user_id,
+ )
+ )
+ self.assertEqual(data.get("address"), self.address)
+
+ # Check that the request was done against the rewritten server name.
+ post_json_get_json.assert_called_once_with(
+ "https://%s/_matrix/identity/api/v1/3pid/bind" % self.rewritten_is_url,
+ {
+ "sid": creds["sid"],
+ "client_secret": creds["client_secret"],
+ "mxid": self.user_id,
+ },
+ headers={},
+ )
+
+ # Check that the original server name is saved in the database instead of the
+ # rewritten one.
+ id_servers = self.get_success(
+ store.get_id_servers_user_bound(self.user_id, "email", self.address)
+ )
+ self.assertEqual(id_servers, [self.is_server_name])
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index d60c124eec..2311040201 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -67,13 +67,11 @@ class ProfileTestCase(unittest.TestCase):
self.bob = UserID.from_string("@4567:test")
self.alice = UserID.from_string("@alice:remote")
- yield self.store.create_profile(self.frank.localpart)
-
self.handler = hs.get_profile_handler()
@defer.inlineCallbacks
def test_get_my_name(self):
- yield self.store.set_profile_displayname(self.frank.localpart, "Frank")
+ yield self.store.set_profile_displayname(self.frank.localpart, "Frank", 1)
displayname = yield self.handler.get_displayname(self.frank)
@@ -116,8 +114,7 @@ class ProfileTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_incoming_fed_query(self):
- yield self.store.create_profile("caroline")
- yield self.store.set_profile_displayname("caroline", "Caroline")
+ yield self.store.set_profile_displayname("caroline", "Caroline", 1)
response = yield self.query_handlers["profile"](
{"user_id": "@caroline:test", "field": "displayname"}
@@ -128,7 +125,7 @@ class ProfileTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_get_my_avatar(self):
yield self.store.set_profile_avatar_url(
- self.frank.localpart, "http://my.server/me.png"
+ self.frank.localpart, "http://my.server/me.png", 1
)
avatar_url = yield self.handler.get_avatar_url(self.frank)
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 0ad0a88165..ae43c6ea7e 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -20,6 +20,7 @@ from twisted.internet import defer
from synapse.api.constants import UserTypes
from synapse.api.errors import Codes, ResourceLimitError, SynapseError
from synapse.handlers.register import RegistrationHandler
+from synapse.rest.client.v2_alpha.register import _map_email_to_displayname
from synapse.types import RoomAlias, UserID, create_requester
from .. import unittest
@@ -171,11 +172,11 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
rooms = self.get_success(self.store.get_rooms_for_user(user_id))
self.assertEqual(len(rooms), 0)
- def test_auto_create_auto_join_rooms_when_support_user_exists(self):
+ def test_auto_create_auto_join_rooms_when_user_is_not_a_real_user(self):
room_alias_str = "#room:test"
self.hs.config.auto_join_rooms = [room_alias_str]
- self.store.is_support_user = Mock(return_value=True)
+ self.store.is_real_user = Mock(return_value=False)
user_id = self.get_success(self.handler.register_user(localpart="support"))
rooms = self.get_success(self.store.get_rooms_for_user(user_id))
self.assertEqual(len(rooms), 0)
@@ -183,6 +184,31 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
room_alias = RoomAlias.from_string(room_alias_str)
self.get_failure(directory_handler.get_association(room_alias), SynapseError)
+ def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self):
+ room_alias_str = "#room:test"
+ self.hs.config.auto_join_rooms = [room_alias_str]
+
+ self.store.count_real_users = Mock(return_value=1)
+ self.store.is_real_user = Mock(return_value=True)
+ user_id = self.get_success(self.handler.register_user(localpart="real"))
+ rooms = self.get_success(self.store.get_rooms_for_user(user_id))
+ directory_handler = self.hs.get_handlers().directory_handler
+ room_alias = RoomAlias.from_string(room_alias_str)
+ room_id = self.get_success(directory_handler.get_association(room_alias))
+
+ self.assertTrue(room_id["room_id"] in rooms)
+ self.assertEqual(len(rooms), 1)
+
+ def test_auto_create_auto_join_rooms_when_user_is_not_the_first_real_user(self):
+ room_alias_str = "#room:test"
+ self.hs.config.auto_join_rooms = [room_alias_str]
+
+ self.store.count_real_users = Mock(return_value=2)
+ self.store.is_real_user = Mock(return_value=True)
+ user_id = self.get_success(self.handler.register_user(localpart="real"))
+ rooms = self.get_success(self.store.get_rooms_for_user(user_id))
+ self.assertEqual(len(rooms), 0)
+
def test_auto_create_auto_join_where_no_consent(self):
"""Test to ensure that the first user is not auto-joined to a room if
they have not given general consent.
@@ -231,6 +257,26 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
self.handler.register_user(localpart=invalid_user_id), SynapseError
)
+ def test_email_to_displayname_mapping(self):
+ """Test that custom emails are mapped to new user displaynames correctly"""
+ self._check_mapping(
+ "jack-phillips.rivers@big-org.com", "Jack-Phillips Rivers [Big-Org]"
+ )
+
+ self._check_mapping("bob.jones@matrix.org", "Bob Jones [Tchap Admin]")
+
+ self._check_mapping("bob-jones.blabla@gouv.fr", "Bob-Jones Blabla [Gouv]")
+
+ # Multibyte unicode characters
+ self._check_mapping(
+ "j\u030a\u0065an-poppy.seed@example.com",
+ "J\u030a\u0065an-Poppy Seed [Example]",
+ )
+
+ def _check_mapping(self, i, expected):
+ result = _map_email_to_displayname(i)
+ self.assertEqual(result, expected)
+
@defer.inlineCallbacks
def get_or_create_user(self, requester, localpart, displayname, password_hash=None):
"""Creates a new user if the user does not exist,
@@ -283,4 +329,4 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
user, requester, displayname, by_admin=True
)
- return (user_id, token)
+ return user_id, token
diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index a8b858eb4f..89ec8636a6 100644
--- a/tests/handlers/test_stats.py
+++ b/tests/handlers/test_stats.py
@@ -13,16 +13,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from mock import Mock
-
-from twisted.internet import defer
-
-from synapse.api.constants import EventTypes, Membership
+from synapse import storage
from synapse.rest import admin
from synapse.rest.client.v1 import login, room
from tests import unittest
+# The expected number of state events in a fresh public room.
+EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM = 5
+
+# The expected number of state events in a fresh private room.
+#
+# Note: we increase this by 1 on the dinsic branch as we send
+# a "im.vector.room.access_rules" state event into new private rooms
+EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM = 7
+
class StatsRoomTests(unittest.HomeserverTestCase):
@@ -33,7 +38,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
]
def prepare(self, reactor, clock, hs):
-
self.store = hs.get_datastore()
self.handler = self.hs.get_stats_handler()
@@ -47,7 +51,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.get_success(
self.store._simple_insert(
"background_updates",
- {"update_name": "populate_stats_createtables", "progress_json": "{}"},
+ {"update_name": "populate_stats_prepare", "progress_json": "{}"},
)
)
self.get_success(
@@ -56,7 +60,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
{
"update_name": "populate_stats_process_rooms",
"progress_json": "{}",
- "depends_on": "populate_stats_createtables",
+ "depends_on": "populate_stats_prepare",
},
)
)
@@ -64,18 +68,58 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.store._simple_insert(
"background_updates",
{
- "update_name": "populate_stats_cleanup",
+ "update_name": "populate_stats_process_users",
"progress_json": "{}",
"depends_on": "populate_stats_process_rooms",
},
)
)
+ self.get_success(
+ self.store._simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_stats_cleanup",
+ "progress_json": "{}",
+ "depends_on": "populate_stats_process_users",
+ },
+ )
+ )
+
+ def get_all_room_state(self):
+ return self.store._simple_select_list(
+ "room_stats_state", None, retcols=("name", "topic", "canonical_alias")
+ )
+
+ def _get_current_stats(self, stats_type, stat_id):
+ table, id_col = storage.stats.TYPE_TO_TABLE[stats_type]
+
+ cols = list(storage.stats.ABSOLUTE_STATS_FIELDS[stats_type]) + list(
+ storage.stats.PER_SLICE_FIELDS[stats_type]
+ )
+
+ end_ts = self.store.quantise_stats_time(self.reactor.seconds() * 1000)
+
+ return self.get_success(
+ self.store._simple_select_one(
+ table + "_historical",
+ {id_col: stat_id, end_ts: end_ts},
+ cols,
+ allow_none=True,
+ )
+ )
+
+ def _perform_background_initial_update(self):
+ # Do the initial population of the stats via the background update
+ self._add_background_updates()
+
+ while not self.get_success(self.store.has_completed_background_updates()):
+ self.get_success(self.store.do_next_background_update(100), by=0.1)
def test_initial_room(self):
"""
The background updates will build the table from scratch.
"""
- r = self.get_success(self.store.get_all_room_state())
+ r = self.get_success(self.get_all_room_state())
self.assertEqual(len(r), 0)
# Disable stats
@@ -91,7 +135,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
)
# Stats disabled, shouldn't have done anything
- r = self.get_success(self.store.get_all_room_state())
+ r = self.get_success(self.get_all_room_state())
self.assertEqual(len(r), 0)
# Enable stats
@@ -104,7 +148,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
while not self.get_success(self.store.has_completed_background_updates()):
self.get_success(self.store.do_next_background_update(100), by=0.1)
- r = self.get_success(self.store.get_all_room_state())
+ r = self.get_success(self.get_all_room_state())
self.assertEqual(len(r), 1)
self.assertEqual(r[0]["topic"], "foo")
@@ -114,6 +158,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
Ingestion via notify_new_event will ignore tokens that the background
update have already processed.
"""
+
self.reactor.advance(86401)
self.hs.config.stats_enabled = False
@@ -138,12 +183,18 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.hs.config.stats_enabled = True
self.handler.stats_enabled = True
self.store._all_done = False
- self.get_success(self.store.update_stats_stream_pos(None))
+ self.get_success(
+ self.store._simple_update_one(
+ table="stats_incremental_position",
+ keyvalues={},
+ updatevalues={"stream_id": 0},
+ )
+ )
self.get_success(
self.store._simple_insert(
"background_updates",
- {"update_name": "populate_stats_createtables", "progress_json": "{}"},
+ {"update_name": "populate_stats_prepare", "progress_json": "{}"},
)
)
@@ -154,6 +205,8 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.helper.invite(room=room_1, src=u1, targ=u2, tok=u1_token)
self.helper.join(room=room_1, user=u2, tok=u2_token)
+ # orig_delta_processor = self.store.
+
# Now do the initial ingestion.
self.get_success(
self.store._simple_insert(
@@ -185,8 +238,15 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.helper.invite(room=room_1, src=u1, targ=u3, tok=u1_token)
self.helper.join(room=room_1, user=u3, tok=u3_token)
- # Get the deltas! There should be two -- day 1, and day 2.
- r = self.get_success(self.store.get_deltas_for_room(room_1, 0))
+ # self.handler.notify_new_event()
+
+ # We need to let the delta processor advance…
+ self.pump(10 * 60)
+
+ # Get the slices! There should be two -- day 1, and day 2.
+ r = self.get_success(self.store.get_statistics_for_subject("room", room_1, 0))
+
+ self.assertEqual(len(r), 2)
# The oldest has 2 joined members
self.assertEqual(r[-1]["joined_members"], 2)
@@ -194,111 +254,476 @@ class StatsRoomTests(unittest.HomeserverTestCase):
# The newest has 3
self.assertEqual(r[0]["joined_members"], 3)
- def test_incorrect_state_transition(self):
- """
- If the state transition is not one of (JOIN, INVITE, LEAVE, BAN) to
- (JOIN, INVITE, LEAVE, BAN), an error is raised.
- """
- events = {
- "a1": {"membership": Membership.LEAVE},
- "a2": {"membership": "not a real thing"},
- }
-
- def get_event(event_id, allow_none=True):
- m = Mock()
- m.content = events[event_id]
- d = defer.Deferred()
- self.reactor.callLater(0.0, d.callback, m)
- return d
-
- def get_received_ts(event_id):
- return defer.succeed(1)
-
- self.store.get_received_ts = get_received_ts
- self.store.get_event = get_event
-
- deltas = [
- {
- "type": EventTypes.Member,
- "state_key": "some_user",
- "room_id": "room",
- "event_id": "a1",
- "prev_event_id": "a2",
- "stream_id": 60,
- }
- ]
-
- f = self.get_failure(self.handler._handle_deltas(deltas), ValueError)
+ def test_create_user(self):
+ """
+ When we create a user, it should have statistics already ready.
+ """
+
+ u1 = self.register_user("u1", "pass")
+
+ u1stats = self._get_current_stats("user", u1)
+
+ self.assertIsNotNone(u1stats)
+
+ # not in any rooms by default
+ self.assertEqual(u1stats["joined_rooms"], 0)
+
+ def test_create_room(self):
+ """
+ When we create a room, it should have statistics already ready.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+ r1stats = self._get_current_stats("room", r1)
+ r2 = self.helper.create_room_as(u1, tok=u1token, is_public=False)
+ r2stats = self._get_current_stats("room", r2)
+
+ self.assertIsNotNone(r1stats)
+ self.assertIsNotNone(r2stats)
+
+ # contains the default things you'd expect in a fresh room
self.assertEqual(
- f.value.args[0], "'not a real thing' is not a valid prev_membership"
- )
-
- # And the other way...
- deltas = [
- {
- "type": EventTypes.Member,
- "state_key": "some_user",
- "room_id": "room",
- "event_id": "a2",
- "prev_event_id": "a1",
- "stream_id": 100,
- }
- ]
-
- f = self.get_failure(self.handler._handle_deltas(deltas), ValueError)
+ r1stats["total_events"],
+ EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM,
+ "Wrong number of total_events in new room's stats!"
+ " You may need to update this if more state events are added to"
+ " the room creation process.",
+ )
self.assertEqual(
- f.value.args[0], "'not a real thing' is not a valid membership"
+ r2stats["total_events"],
+ EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM,
+ "Wrong number of total_events in new room's stats!"
+ " You may need to update this if more state events are added to"
+ " the room creation process.",
)
- def test_redacted_prev_event(self):
+ self.assertEqual(
+ r1stats["current_state_events"], EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM
+ )
+ self.assertEqual(
+ r2stats["current_state_events"], EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM
+ )
+
+ self.assertEqual(r1stats["joined_members"], 1)
+ self.assertEqual(r1stats["invited_members"], 0)
+ self.assertEqual(r1stats["banned_members"], 0)
+
+ self.assertEqual(r2stats["joined_members"], 1)
+ self.assertEqual(r2stats["invited_members"], 0)
+ self.assertEqual(r2stats["banned_members"], 0)
+
+ def test_send_message_increments_total_events(self):
"""
- If the prev_event does not exist, then it is assumed to be a LEAVE.
+ When we send a message, it increments total_events.
"""
+
+ self._perform_background_initial_update()
+
u1 = self.register_user("u1", "pass")
- u1_token = self.login("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+ r1stats_ante = self._get_current_stats("room", r1)
- room_1 = self.helper.create_room_as(u1, tok=u1_token)
+ self.helper.send(r1, "hiss", tok=u1token)
- # Do the initial population of the user directory via the background update
- self._add_background_updates()
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+
+ def test_send_state_event_nonoverwriting(self):
+ """
+ When we send a non-overwriting state event, it increments total_events AND current_state_events
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ self.helper.send_state(
+ r1, "cat.hissing", {"value": True}, tok=u1token, state_key="tabby"
+ )
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.send_state(
+ r1, "cat.hissing", {"value": False}, tok=u1token, state_key="moggy"
+ )
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 1,
+ )
+
+ def test_send_state_event_overwriting(self):
+ """
+ When we send an overwriting state event, it increments total_events ONLY
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ self.helper.send_state(
+ r1, "cat.hissing", {"value": True}, tok=u1token, state_key="tabby"
+ )
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.send_state(
+ r1, "cat.hissing", {"value": False}, tok=u1token, state_key="tabby"
+ )
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 0,
+ )
+
+ def test_join_first_time(self):
+ """
+ When a user joins a room for the first time, total_events, current_state_events and
+ joined_members should increase by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.join(r1, u2, tok=u2token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 1,
+ )
+ self.assertEqual(
+ r1stats_post["joined_members"] - r1stats_ante["joined_members"], 1
+ )
+
+ def test_join_after_leave(self):
+ """
+ When a user joins a room after being previously left, total_events and
+ joined_members should increase by exactly 1.
+ current_state_events should not increase.
+ left_members should decrease by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+
+ self.helper.join(r1, u2, tok=u2token)
+ self.helper.leave(r1, u2, tok=u2token)
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.join(r1, u2, tok=u2token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 0,
+ )
+ self.assertEqual(
+ r1stats_post["joined_members"] - r1stats_ante["joined_members"], +1
+ )
+ self.assertEqual(
+ r1stats_post["left_members"] - r1stats_ante["left_members"], -1
+ )
+
+ def test_invited(self):
+ """
+ When a user invites another user, current_state_events, total_events and
+ invited_members should increase by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.invite(r1, u1, u2, tok=u1token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 1,
+ )
+ self.assertEqual(
+ r1stats_post["invited_members"] - r1stats_ante["invited_members"], +1
+ )
+
+ def test_join_after_invite(self):
+ """
+ When a user joins a room after being invited, total_events and
+ joined_members should increase by exactly 1.
+ current_state_events should not increase.
+ invited_members should decrease by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+
+ self.helper.invite(r1, u1, u2, tok=u1token)
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.join(r1, u2, tok=u2token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 0,
+ )
+ self.assertEqual(
+ r1stats_post["joined_members"] - r1stats_ante["joined_members"], +1
+ )
+ self.assertEqual(
+ r1stats_post["invited_members"] - r1stats_ante["invited_members"], -1
+ )
+
+ def test_left(self):
+ """
+ When a user leaves a room after joining, total_events and
+ left_members should increase by exactly 1.
+ current_state_events should not increase.
+ joined_members should decrease by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+
+ self.helper.join(r1, u2, tok=u2token)
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.leave(r1, u2, tok=u2token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 0,
+ )
+ self.assertEqual(
+ r1stats_post["left_members"] - r1stats_ante["left_members"], +1
+ )
+ self.assertEqual(
+ r1stats_post["joined_members"] - r1stats_ante["joined_members"], -1
+ )
+
+ def test_banned(self):
+ """
+ When a user is banned from a room after joining, total_events and
+ left_members should increase by exactly 1.
+ current_state_events should not increase.
+ banned_members should decrease by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+
+ self.helper.join(r1, u2, tok=u2token)
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.change_membership(r1, u1, u2, "ban", tok=u1token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 0,
+ )
+ self.assertEqual(
+ r1stats_post["banned_members"] - r1stats_ante["banned_members"], +1
+ )
+ self.assertEqual(
+ r1stats_post["joined_members"] - r1stats_ante["joined_members"], -1
+ )
+
+ def test_initial_background_update(self):
+ """
+ Test that statistics can be generated by the initial background update
+ handler.
+
+ This test also tests that stats rows are not created for new subjects
+ when stats are disabled. However, it may be desirable to change this
+ behaviour eventually to still keep current rows.
+ """
+
+ self.hs.config.stats_enabled = False
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ # test that these subjects, which were created during a time of disabled
+ # stats, do not have stats.
+ self.assertIsNone(self._get_current_stats("room", r1))
+ self.assertIsNone(self._get_current_stats("user", u1))
+
+ self.hs.config.stats_enabled = True
+
+ self._perform_background_initial_update()
+
+ r1stats = self._get_current_stats("room", r1)
+ u1stats = self._get_current_stats("user", u1)
+
+ self.assertEqual(r1stats["joined_members"], 1)
+ self.assertEqual(
+ r1stats["current_state_events"], EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM
+ )
+
+ self.assertEqual(u1stats["joined_rooms"], 1)
+
+ def test_incomplete_stats(self):
+ """
+ This tests that we track incomplete statistics.
+
+ We first test that incomplete stats are incrementally generated,
+ following the preparation of a background regen.
+
+ We then test that these incomplete rows are completed by the background
+ regen.
+ """
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+ u3 = self.register_user("u3", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token, is_public=False)
+
+ # preparation stage of the initial background update
+ # Ugh, have to reset this flag
+ self.store._all_done = False
+
+ self.get_success(
+ self.store._simple_delete(
+ "room_stats_current", {"1": 1}, "test_delete_stats"
+ )
+ )
+ self.get_success(
+ self.store._simple_delete(
+ "user_stats_current", {"1": 1}, "test_delete_stats"
+ )
+ )
+
+ self.helper.invite(r1, u1, u2, tok=u1token)
+ self.helper.join(r1, u2, tok=u2token)
+ self.helper.invite(r1, u1, u3, tok=u1token)
+ self.helper.send(r1, "thou shalt yield", tok=u1token)
+
+ # now do the background updates
+
+ self.store._all_done = False
+ self.get_success(
+ self.store._simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_stats_process_rooms",
+ "progress_json": "{}",
+ "depends_on": "populate_stats_prepare",
+ },
+ )
+ )
+ self.get_success(
+ self.store._simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_stats_process_users",
+ "progress_json": "{}",
+ "depends_on": "populate_stats_process_rooms",
+ },
+ )
+ )
+ self.get_success(
+ self.store._simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_stats_cleanup",
+ "progress_json": "{}",
+ "depends_on": "populate_stats_process_users",
+ },
+ )
+ )
while not self.get_success(self.store.has_completed_background_updates()):
self.get_success(self.store.do_next_background_update(100), by=0.1)
- events = {"a1": None, "a2": {"membership": Membership.JOIN}}
-
- def get_event(event_id, allow_none=True):
- if events.get(event_id):
- m = Mock()
- m.content = events[event_id]
- else:
- m = None
- d = defer.Deferred()
- self.reactor.callLater(0.0, d.callback, m)
- return d
-
- def get_received_ts(event_id):
- return defer.succeed(1)
-
- self.store.get_received_ts = get_received_ts
- self.store.get_event = get_event
-
- deltas = [
- {
- "type": EventTypes.Member,
- "state_key": "some_user:test",
- "room_id": room_1,
- "event_id": "a2",
- "prev_event_id": "a1",
- "stream_id": 100,
- }
- ]
-
- # Handle our fake deltas, which has a user going from LEAVE -> JOIN.
- self.get_success(self.handler._handle_deltas(deltas))
-
- # One delta, with two joined members -- the room creator, and our fake
- # user.
- r = self.get_success(self.store.get_deltas_for_room(room_1, 0))
- self.assertEqual(len(r), 1)
- self.assertEqual(r[0]["joined_members"], 2)
+ r1stats_complete = self._get_current_stats("room", r1)
+ u1stats_complete = self._get_current_stats("user", u1)
+ u2stats_complete = self._get_current_stats("user", u2)
+
+ # now we make our assertions
+
+ # check that _complete rows are complete and correct
+ self.assertEqual(r1stats_complete["joined_members"], 2)
+ self.assertEqual(r1stats_complete["invited_members"], 1)
+
+ self.assertEqual(
+ r1stats_complete["current_state_events"],
+ 2 + EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM,
+ )
+
+ self.assertEqual(u1stats_complete["joined_rooms"], 1)
+ self.assertEqual(u2stats_complete["joined_rooms"], 1)
diff --git a/tests/http/__init__.py b/tests/http/__init__.py
index 2d5dba6464..2096ba3c91 100644
--- a/tests/http/__init__.py
+++ b/tests/http/__init__.py
@@ -20,6 +20,23 @@ from zope.interface import implementer
from OpenSSL import SSL
from OpenSSL.SSL import Connection
from twisted.internet.interfaces import IOpenSSLServerConnectionCreator
+from twisted.internet.ssl import Certificate, trustRootFromCertificates
+from twisted.web.client import BrowserLikePolicyForHTTPS # noqa: F401
+from twisted.web.iweb import IPolicyForHTTPS # noqa: F401
+
+
+def get_test_https_policy():
+ """Get a test IPolicyForHTTPS which trusts the test CA cert
+
+ Returns:
+ IPolicyForHTTPS
+ """
+ ca_file = get_test_ca_cert_file()
+ with open(ca_file) as stream:
+ content = stream.read()
+ cert = Certificate.loadPEM(content)
+ trust_root = trustRootFromCertificates([cert])
+ return BrowserLikePolicyForHTTPS(trustRoot=trust_root)
def get_test_ca_cert_file():
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
index 1435baede2..cfcd98ff7d 100644
--- a/tests/http/federation/test_matrix_federation_agent.py
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -41,9 +41,9 @@ from synapse.http.federation.well_known_resolver import (
from synapse.logging.context import LoggingContext
from synapse.util.caches.ttlcache import TTLCache
+from tests import unittest
from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file
from tests.server import FakeTransport, ThreadedMemoryReactorClock
-from tests.unittest import TestCase
from tests.utils import default_config
logger = logging.getLogger(__name__)
@@ -67,14 +67,12 @@ def get_connection_factory():
return test_server_connection_factory
-class MatrixFederationAgentTests(TestCase):
+class MatrixFederationAgentTests(unittest.TestCase):
def setUp(self):
self.reactor = ThreadedMemoryReactorClock()
self.mock_resolver = Mock()
- self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
-
config_dict = default_config("test", parse=False)
config_dict["federation_custom_ca_list"] = [get_test_ca_cert_file()]
@@ -82,11 +80,21 @@ class MatrixFederationAgentTests(TestCase):
config.parse_config_dict(config_dict, "", "")
self.tls_factory = ClientTLSOptionsFactory(config)
+
+ self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
+ self.had_well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
+ self.well_known_resolver = WellKnownResolver(
+ self.reactor,
+ Agent(self.reactor, contextFactory=self.tls_factory),
+ well_known_cache=self.well_known_cache,
+ had_well_known_cache=self.had_well_known_cache,
+ )
+
self.agent = MatrixFederationAgent(
reactor=self.reactor,
tls_client_options_factory=self.tls_factory,
_srv_resolver=self.mock_resolver,
- _well_known_cache=self.well_known_cache,
+ _well_known_resolver=self.well_known_resolver,
)
def _make_connection(self, client_factory, expected_sni):
@@ -116,19 +124,24 @@ class MatrixFederationAgentTests(TestCase):
FakeTransport(client_protocol, self.reactor, server_tls_protocol)
)
+ # grab a hold of the TLS connection, in case it gets torn down
+ server_tls_connection = server_tls_protocol._tlsConnection
+
+ # fish the test server back out of the server-side TLS protocol.
+ http_protocol = server_tls_protocol.wrappedProtocol
+
# give the reactor a pump to get the TLS juices flowing.
self.reactor.pump((0.1,))
# check the SNI
- server_name = server_tls_protocol._tlsConnection.get_servername()
+ server_name = server_tls_connection.get_servername()
self.assertEqual(
server_name,
expected_sni,
"Expected SNI %s but got %s" % (expected_sni, server_name),
)
- # fish the test server back out of the server-side TLS protocol.
- return server_tls_protocol.wrappedProtocol
+ return http_protocol
@defer.inlineCallbacks
def _make_get_request(self, uri):
@@ -543,7 +556,7 @@ class MatrixFederationAgentTests(TestCase):
self.assertEqual(self.well_known_cache[b"testserv"], b"target-server")
# check the cache expires
- self.reactor.pump((25 * 3600,))
+ self.reactor.pump((48 * 3600,))
self.well_known_cache.expire()
self.assertNotIn(b"testserv", self.well_known_cache)
@@ -631,7 +644,7 @@ class MatrixFederationAgentTests(TestCase):
self.assertEqual(self.well_known_cache[b"testserv"], b"target-server")
# check the cache expires
- self.reactor.pump((25 * 3600,))
+ self.reactor.pump((48 * 3600,))
self.well_known_cache.expire()
self.assertNotIn(b"testserv", self.well_known_cache)
@@ -701,11 +714,18 @@ class MatrixFederationAgentTests(TestCase):
config = default_config("test", parse=True)
+ # Build a new agent and WellKnownResolver with a different tls factory
+ tls_factory = ClientTLSOptionsFactory(config)
agent = MatrixFederationAgent(
reactor=self.reactor,
- tls_client_options_factory=ClientTLSOptionsFactory(config),
+ tls_client_options_factory=tls_factory,
_srv_resolver=self.mock_resolver,
- _well_known_cache=self.well_known_cache,
+ _well_known_resolver=WellKnownResolver(
+ self.reactor,
+ Agent(self.reactor, contextFactory=tls_factory),
+ well_known_cache=self.well_known_cache,
+ had_well_known_cache=self.had_well_known_cache,
+ ),
)
test_d = agent.request(b"GET", b"matrix://testserv/foo/bar")
@@ -932,15 +952,9 @@ class MatrixFederationAgentTests(TestCase):
self.successResultOf(test_d)
def test_well_known_cache(self):
- well_known_resolver = WellKnownResolver(
- self.reactor,
- Agent(self.reactor, contextFactory=self.tls_factory),
- well_known_cache=self.well_known_cache,
- )
-
self.reactor.lookups["testserv"] = "1.2.3.4"
- fetch_d = well_known_resolver.get_well_known(b"testserv")
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
@@ -963,7 +977,7 @@ class MatrixFederationAgentTests(TestCase):
well_known_server.loseConnection()
# repeat the request: it should hit the cache
- fetch_d = well_known_resolver.get_well_known(b"testserv")
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
r = self.successResultOf(fetch_d)
self.assertEqual(r.delegated_server, b"target-server")
@@ -971,7 +985,7 @@ class MatrixFederationAgentTests(TestCase):
self.reactor.pump((1000.0,))
# now it should connect again
- fetch_d = well_known_resolver.get_well_known(b"testserv")
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
@@ -987,8 +1001,137 @@ class MatrixFederationAgentTests(TestCase):
r = self.successResultOf(fetch_d)
self.assertEqual(r.delegated_server, b"other-server")
+ def test_well_known_cache_with_temp_failure(self):
+ """Test that we refetch well-known before the cache expires, and that
+ it ignores transient errors.
+ """
+
+ self.reactor.lookups["testserv"] = "1.2.3.4"
+
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
+
+ # there should be an attempt to connect on port 443 for the .well-known
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+ self.assertEqual(host, "1.2.3.4")
+ self.assertEqual(port, 443)
+
+ well_known_server = self._handle_well_known_connection(
+ client_factory,
+ expected_sni=b"testserv",
+ response_headers={b"Cache-Control": b"max-age=1000"},
+ content=b'{ "m.server": "target-server" }',
+ )
+
+ r = self.successResultOf(fetch_d)
+ self.assertEqual(r.delegated_server, b"target-server")
+
+ # close the tcp connection
+ well_known_server.loseConnection()
+
+ # Get close to the cache expiry, this will cause the resolver to do
+ # another lookup.
+ self.reactor.pump((900.0,))
+
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
+
+ # The resolver may retry a few times, so fonx all requests that come along
+ attempts = 0
+ while self.reactor.tcpClients:
+ clients = self.reactor.tcpClients
+ (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+
+ attempts += 1
+
+ # fonx the connection attempt, this will be treated as a temporary
+ # failure.
+ client_factory.clientConnectionFailed(None, Exception("nope"))
+
+ # There's a few sleeps involved, so we have to pump the reactor a
+ # bit.
+ self.reactor.pump((1.0, 1.0))
+
+ # We expect to see more than one attempt as there was previously a valid
+ # well known.
+ self.assertGreater(attempts, 1)
+
+ # Resolver should return cached value, despite the lookup failing.
+ r = self.successResultOf(fetch_d)
+ self.assertEqual(r.delegated_server, b"target-server")
+
+ # Expire both caches and repeat the request
+ self.reactor.pump((10000.0,))
+
+ # Repated the request, this time it should fail if the lookup fails.
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
+
+ clients = self.reactor.tcpClients
+ (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+ client_factory.clientConnectionFailed(None, Exception("nope"))
+ self.reactor.pump((0.4,))
+
+ r = self.successResultOf(fetch_d)
+ self.assertEqual(r.delegated_server, None)
+
+ def test_srv_fallbacks(self):
+ """Test that other SRV results are tried if the first one fails.
+ """
+
+ self.mock_resolver.resolve_service.side_effect = lambda _: [
+ Server(host=b"target.com", port=8443),
+ Server(host=b"target.com", port=8444),
+ ]
+ self.reactor.lookups["target.com"] = "1.2.3.4"
+
+ test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+
+ # Nothing happened yet
+ self.assertNoResult(test_d)
+
+ self.mock_resolver.resolve_service.assert_called_once_with(
+ b"_matrix._tcp.testserv"
+ )
+
+ # We should see an attempt to connect to the first server
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+ self.assertEqual(host, "1.2.3.4")
+ self.assertEqual(port, 8443)
+
+ # Fonx the connection
+ client_factory.clientConnectionFailed(None, Exception("nope"))
+
+ # There's a 300ms delay in HostnameEndpoint
+ self.reactor.pump((0.4,))
+
+ # Hasn't failed yet
+ self.assertNoResult(test_d)
+
+ # We shouldnow see an attempt to connect to the second server
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+ self.assertEqual(host, "1.2.3.4")
+ self.assertEqual(port, 8444)
+
+ # make a test server, and wire up the client
+ http_server = self._make_connection(client_factory, expected_sni=b"testserv")
+
+ self.assertEqual(len(http_server.requests), 1)
+ request = http_server.requests[0]
+ self.assertEqual(request.method, b"GET")
+ self.assertEqual(request.path, b"/foo/bar")
+ self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"])
+
+ # finish the request
+ request.finish()
+ self.reactor.pump((0.1,))
+ self.successResultOf(test_d)
+
-class TestCachePeriodFromHeaders(TestCase):
+class TestCachePeriodFromHeaders(unittest.TestCase):
def test_cache_control(self):
# uppercase
self.assertEqual(
diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py
index 3b885ef64b..df034ab237 100644
--- a/tests/http/federation/test_srv_resolver.py
+++ b/tests/http/federation/test_srv_resolver.py
@@ -83,8 +83,10 @@ class SrvResolverTestCase(unittest.TestCase):
service_name = b"test_service.example.com"
- entry = Mock(spec_set=["expires"])
+ entry = Mock(spec_set=["expires", "priority", "weight"])
entry.expires = 0
+ entry.priority = 0
+ entry.weight = 0
cache = {service_name: [entry]}
resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
@@ -105,8 +107,10 @@ class SrvResolverTestCase(unittest.TestCase):
service_name = b"test_service.example.com"
- entry = Mock(spec_set=["expires"])
+ entry = Mock(spec_set=["expires", "priority", "weight"])
entry.expires = 999999999
+ entry.priority = 0
+ entry.weight = 0
cache = {service_name: [entry]}
resolver = SrvResolver(
diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py
new file mode 100644
index 0000000000..22abf76515
--- /dev/null
+++ b/tests/http/test_proxyagent.py
@@ -0,0 +1,334 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+import treq
+
+from twisted.internet import interfaces # noqa: F401
+from twisted.internet.protocol import Factory
+from twisted.protocols.tls import TLSMemoryBIOFactory
+from twisted.web.http import HTTPChannel
+
+from synapse.http.proxyagent import ProxyAgent
+
+from tests.http import TestServerTLSConnectionFactory, get_test_https_policy
+from tests.server import FakeTransport, ThreadedMemoryReactorClock
+from tests.unittest import TestCase
+
+logger = logging.getLogger(__name__)
+
+HTTPFactory = Factory.forProtocol(HTTPChannel)
+
+
+class MatrixFederationAgentTests(TestCase):
+ def setUp(self):
+ self.reactor = ThreadedMemoryReactorClock()
+
+ def _make_connection(
+ self, client_factory, server_factory, ssl=False, expected_sni=None
+ ):
+ """Builds a test server, and completes the outgoing client connection
+
+ Args:
+ client_factory (interfaces.IProtocolFactory): the the factory that the
+ application is trying to use to make the outbound connection. We will
+ invoke it to build the client Protocol
+
+ server_factory (interfaces.IProtocolFactory): a factory to build the
+ server-side protocol
+
+ ssl (bool): If true, we will expect an ssl connection and wrap
+ server_factory with a TLSMemoryBIOFactory
+
+ expected_sni (bytes|None): the expected SNI value
+
+ Returns:
+ IProtocol: the server Protocol returned by server_factory
+ """
+ if ssl:
+ server_factory = _wrap_server_factory_for_tls(server_factory)
+
+ server_protocol = server_factory.buildProtocol(None)
+
+ # now, tell the client protocol factory to build the client protocol,
+ # and wire the output of said protocol up to the server via
+ # a FakeTransport.
+ #
+ # Normally this would be done by the TCP socket code in Twisted, but we are
+ # stubbing that out here.
+ client_protocol = client_factory.buildProtocol(None)
+ client_protocol.makeConnection(
+ FakeTransport(server_protocol, self.reactor, client_protocol)
+ )
+
+ # tell the server protocol to send its stuff back to the client, too
+ server_protocol.makeConnection(
+ FakeTransport(client_protocol, self.reactor, server_protocol)
+ )
+
+ if ssl:
+ http_protocol = server_protocol.wrappedProtocol
+ tls_connection = server_protocol._tlsConnection
+ else:
+ http_protocol = server_protocol
+ tls_connection = None
+
+ # give the reactor a pump to get the TLS juices flowing (if needed)
+ self.reactor.advance(0)
+
+ if expected_sni is not None:
+ server_name = tls_connection.get_servername()
+ self.assertEqual(
+ server_name,
+ expected_sni,
+ "Expected SNI %s but got %s" % (expected_sni, server_name),
+ )
+
+ return http_protocol
+
+ def test_http_request(self):
+ agent = ProxyAgent(self.reactor)
+
+ self.reactor.lookups["test.com"] = "1.2.3.4"
+ d = agent.request(b"GET", b"http://test.com")
+
+ # there should be a pending TCP connection
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+ self.assertEqual(host, "1.2.3.4")
+ self.assertEqual(port, 80)
+
+ # make a test server, and wire up the client
+ http_server = self._make_connection(
+ client_factory, _get_test_protocol_factory()
+ )
+
+ # the FakeTransport is async, so we need to pump the reactor
+ self.reactor.advance(0)
+
+ # now there should be a pending request
+ self.assertEqual(len(http_server.requests), 1)
+
+ request = http_server.requests[0]
+ self.assertEqual(request.method, b"GET")
+ self.assertEqual(request.path, b"/")
+ self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
+ request.write(b"result")
+ request.finish()
+
+ self.reactor.advance(0)
+
+ resp = self.successResultOf(d)
+ body = self.successResultOf(treq.content(resp))
+ self.assertEqual(body, b"result")
+
+ def test_https_request(self):
+ agent = ProxyAgent(self.reactor, contextFactory=get_test_https_policy())
+
+ self.reactor.lookups["test.com"] = "1.2.3.4"
+ d = agent.request(b"GET", b"https://test.com/abc")
+
+ # there should be a pending TCP connection
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+ self.assertEqual(host, "1.2.3.4")
+ self.assertEqual(port, 443)
+
+ # make a test server, and wire up the client
+ http_server = self._make_connection(
+ client_factory,
+ _get_test_protocol_factory(),
+ ssl=True,
+ expected_sni=b"test.com",
+ )
+
+ # the FakeTransport is async, so we need to pump the reactor
+ self.reactor.advance(0)
+
+ # now there should be a pending request
+ self.assertEqual(len(http_server.requests), 1)
+
+ request = http_server.requests[0]
+ self.assertEqual(request.method, b"GET")
+ self.assertEqual(request.path, b"/abc")
+ self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
+ request.write(b"result")
+ request.finish()
+
+ self.reactor.advance(0)
+
+ resp = self.successResultOf(d)
+ body = self.successResultOf(treq.content(resp))
+ self.assertEqual(body, b"result")
+
+ def test_http_request_via_proxy(self):
+ agent = ProxyAgent(self.reactor, http_proxy=b"proxy.com:8888")
+
+ self.reactor.lookups["proxy.com"] = "1.2.3.5"
+ d = agent.request(b"GET", b"http://test.com")
+
+ # there should be a pending TCP connection
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+ self.assertEqual(host, "1.2.3.5")
+ self.assertEqual(port, 8888)
+
+ # make a test server, and wire up the client
+ http_server = self._make_connection(
+ client_factory, _get_test_protocol_factory()
+ )
+
+ # the FakeTransport is async, so we need to pump the reactor
+ self.reactor.advance(0)
+
+ # now there should be a pending request
+ self.assertEqual(len(http_server.requests), 1)
+
+ request = http_server.requests[0]
+ self.assertEqual(request.method, b"GET")
+ self.assertEqual(request.path, b"http://test.com")
+ self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
+ request.write(b"result")
+ request.finish()
+
+ self.reactor.advance(0)
+
+ resp = self.successResultOf(d)
+ body = self.successResultOf(treq.content(resp))
+ self.assertEqual(body, b"result")
+
+ def test_https_request_via_proxy(self):
+ agent = ProxyAgent(
+ self.reactor,
+ contextFactory=get_test_https_policy(),
+ https_proxy=b"proxy.com",
+ )
+
+ self.reactor.lookups["proxy.com"] = "1.2.3.5"
+ d = agent.request(b"GET", b"https://test.com/abc")
+
+ # there should be a pending TCP connection
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+ self.assertEqual(host, "1.2.3.5")
+ self.assertEqual(port, 1080)
+
+ # make a test HTTP server, and wire up the client
+ proxy_server = self._make_connection(
+ client_factory, _get_test_protocol_factory()
+ )
+
+ # fish the transports back out so that we can do the old switcheroo
+ s2c_transport = proxy_server.transport
+ client_protocol = s2c_transport.other
+ c2s_transport = client_protocol.transport
+
+ # the FakeTransport is async, so we need to pump the reactor
+ self.reactor.advance(0)
+
+ # now there should be a pending CONNECT request
+ self.assertEqual(len(proxy_server.requests), 1)
+
+ request = proxy_server.requests[0]
+ self.assertEqual(request.method, b"CONNECT")
+ self.assertEqual(request.path, b"test.com:443")
+
+ # tell the proxy server not to close the connection
+ proxy_server.persistent = True
+
+ # this just stops the http Request trying to do a chunked response
+ # request.setHeader(b"Content-Length", b"0")
+ request.finish()
+
+ # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel
+ ssl_factory = _wrap_server_factory_for_tls(_get_test_protocol_factory())
+ ssl_protocol = ssl_factory.buildProtocol(None)
+ http_server = ssl_protocol.wrappedProtocol
+
+ ssl_protocol.makeConnection(
+ FakeTransport(client_protocol, self.reactor, ssl_protocol)
+ )
+ c2s_transport.other = ssl_protocol
+
+ self.reactor.advance(0)
+
+ server_name = ssl_protocol._tlsConnection.get_servername()
+ expected_sni = b"test.com"
+ self.assertEqual(
+ server_name,
+ expected_sni,
+ "Expected SNI %s but got %s" % (expected_sni, server_name),
+ )
+
+ # now there should be a pending request
+ self.assertEqual(len(http_server.requests), 1)
+
+ request = http_server.requests[0]
+ self.assertEqual(request.method, b"GET")
+ self.assertEqual(request.path, b"/abc")
+ self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
+ request.write(b"result")
+ request.finish()
+
+ self.reactor.advance(0)
+
+ resp = self.successResultOf(d)
+ body = self.successResultOf(treq.content(resp))
+ self.assertEqual(body, b"result")
+
+
+def _wrap_server_factory_for_tls(factory, sanlist=None):
+ """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory
+
+ The resultant factory will create a TLS server which presents a certificate
+ signed by our test CA, valid for the domains in `sanlist`
+
+ Args:
+ factory (interfaces.IProtocolFactory): protocol factory to wrap
+ sanlist (iterable[bytes]): list of domains the cert should be valid for
+
+ Returns:
+ interfaces.IProtocolFactory
+ """
+ if sanlist is None:
+ sanlist = [b"DNS:test.com"]
+
+ connection_creator = TestServerTLSConnectionFactory(sanlist=sanlist)
+ return TLSMemoryBIOFactory(
+ connection_creator, isClient=False, wrappedFactory=factory
+ )
+
+
+def _get_test_protocol_factory():
+ """Get a protocol Factory which will build an HTTPChannel
+
+ Returns:
+ interfaces.IProtocolFactory
+ """
+ server_factory = Factory.forProtocol(HTTPChannel)
+
+ # Request.finish expects the factory to have a 'log' method.
+ server_factory.log = _log_request
+
+ return server_factory
+
+
+def _log_request(request):
+ """Implements Factory.log, which is expected by Request.finish"""
+ logger.info("Completed request %s", request)
diff --git a/tests/logging/__init__.py b/tests/logging/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/logging/__init__.py
diff --git a/tests/logging/test_structured.py b/tests/logging/test_structured.py
new file mode 100644
index 0000000000..451d05c0f0
--- /dev/null
+++ b/tests/logging/test_structured.py
@@ -0,0 +1,214 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import os.path
+import shutil
+import sys
+import textwrap
+
+from twisted.logger import Logger, eventAsText, eventsFromJSONLogFile
+
+from synapse.config.logger import setup_logging
+from synapse.logging._structured import setup_structured_logging
+from synapse.logging.context import LoggingContext
+
+from tests.unittest import DEBUG, HomeserverTestCase
+
+
+class FakeBeginner(object):
+ def beginLoggingTo(self, observers, **kwargs):
+ self.observers = observers
+
+
+class StructuredLoggingTestBase(object):
+ """
+ Test base that registers a cleanup handler to reset the stdlib log handler
+ to 'unset'.
+ """
+
+ def prepare(self, reactor, clock, hs):
+ def _cleanup():
+ logging.getLogger("synapse").setLevel(logging.NOTSET)
+
+ self.addCleanup(_cleanup)
+
+
+class StructuredLoggingTestCase(StructuredLoggingTestBase, HomeserverTestCase):
+ """
+ Tests for Synapse's structured logging support.
+ """
+
+ def test_output_to_json_round_trip(self):
+ """
+ Synapse logs can be outputted to JSON and then read back again.
+ """
+ temp_dir = self.mktemp()
+ os.mkdir(temp_dir)
+ self.addCleanup(shutil.rmtree, temp_dir)
+
+ json_log_file = os.path.abspath(os.path.join(temp_dir, "out.json"))
+
+ log_config = {
+ "drains": {"jsonfile": {"type": "file_json", "location": json_log_file}}
+ }
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs, self.hs.config, log_config, logBeginner=beginner
+ )
+
+ # Make a logger and send an event
+ logger = Logger(
+ namespace="tests.logging.test_structured", observer=beginner.observers[0]
+ )
+ logger.info("Hello there, {name}!", name="wally")
+
+ # Read the log file and check it has the event we sent
+ with open(json_log_file, "r") as f:
+ logged_events = list(eventsFromJSONLogFile(f))
+ self.assertEqual(len(logged_events), 1)
+
+ # The event pulled from the file should render fine
+ self.assertEqual(
+ eventAsText(logged_events[0], includeTimestamp=False),
+ "[tests.logging.test_structured#info] Hello there, wally!",
+ )
+
+ def test_output_to_text(self):
+ """
+ Synapse logs can be outputted to text.
+ """
+ temp_dir = self.mktemp()
+ os.mkdir(temp_dir)
+ self.addCleanup(shutil.rmtree, temp_dir)
+
+ log_file = os.path.abspath(os.path.join(temp_dir, "out.log"))
+
+ log_config = {"drains": {"file": {"type": "file", "location": log_file}}}
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs, self.hs.config, log_config, logBeginner=beginner
+ )
+
+ # Make a logger and send an event
+ logger = Logger(
+ namespace="tests.logging.test_structured", observer=beginner.observers[0]
+ )
+ logger.info("Hello there, {name}!", name="wally")
+
+ # Read the log file and check it has the event we sent
+ with open(log_file, "r") as f:
+ logged_events = f.read().strip().split("\n")
+ self.assertEqual(len(logged_events), 1)
+
+ # The event pulled from the file should render fine
+ self.assertTrue(
+ logged_events[0].endswith(
+ " - tests.logging.test_structured - INFO - None - Hello there, wally!"
+ )
+ )
+
+ def test_collects_logcontext(self):
+ """
+ Test that log outputs have the attached logging context.
+ """
+ log_config = {"drains": {}}
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ publisher = setup_structured_logging(
+ self.hs, self.hs.config, log_config, logBeginner=beginner
+ )
+
+ logs = []
+
+ publisher.addObserver(logs.append)
+
+ # Make a logger and send an event
+ logger = Logger(
+ namespace="tests.logging.test_structured", observer=beginner.observers[0]
+ )
+
+ with LoggingContext("testcontext", request="somereq"):
+ logger.info("Hello there, {name}!", name="steve")
+
+ self.assertEqual(len(logs), 1)
+ self.assertEqual(logs[0]["request"], "somereq")
+
+
+class StructuredLoggingConfigurationFileTestCase(
+ StructuredLoggingTestBase, HomeserverTestCase
+):
+ def make_homeserver(self, reactor, clock):
+
+ tempdir = self.mktemp()
+ os.mkdir(tempdir)
+ log_config_file = os.path.abspath(os.path.join(tempdir, "log.config.yaml"))
+ self.homeserver_log = os.path.abspath(os.path.join(tempdir, "homeserver.log"))
+
+ config = self.default_config()
+ config["log_config"] = log_config_file
+
+ with open(log_config_file, "w") as f:
+ f.write(
+ textwrap.dedent(
+ """\
+ structured: true
+
+ drains:
+ file:
+ type: file_json
+ location: %s
+ """
+ % (self.homeserver_log,)
+ )
+ )
+
+ self.addCleanup(self._sys_cleanup)
+
+ return self.setup_test_homeserver(config=config)
+
+ def _sys_cleanup(self):
+ sys.stdout = sys.__stdout__
+ sys.stderr = sys.__stderr__
+
+ # Do not remove! We need the logging system to be set other than WARNING.
+ @DEBUG
+ def test_log_output(self):
+ """
+ When a structured logging config is given, Synapse will use it.
+ """
+ beginner = FakeBeginner()
+ publisher = setup_logging(self.hs, self.hs.config, logBeginner=beginner)
+
+ # Make a logger and send an event
+ logger = Logger(namespace="tests.logging.test_structured", observer=publisher)
+
+ with LoggingContext("testcontext", request="somereq"):
+ logger.info("Hello there, {name}!", name="steve")
+
+ with open(self.homeserver_log, "r") as f:
+ logged_events = [
+ eventAsText(x, includeTimestamp=False) for x in eventsFromJSONLogFile(f)
+ ]
+
+ logs = "\n".join(logged_events)
+ self.assertTrue("***** STARTING SERVER *****" in logs)
+ self.assertTrue("Hello there, steve!" in logs)
diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py
new file mode 100644
index 0000000000..4cf81f7128
--- /dev/null
+++ b/tests/logging/test_terse_json.py
@@ -0,0 +1,234 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+from collections import Counter
+
+from twisted.logger import Logger
+
+from synapse.logging._structured import setup_structured_logging
+
+from tests.server import connect_client
+from tests.unittest import HomeserverTestCase
+
+from .test_structured import FakeBeginner, StructuredLoggingTestBase
+
+
+class TerseJSONTCPTestCase(StructuredLoggingTestBase, HomeserverTestCase):
+ def test_log_output(self):
+ """
+ The Terse JSON outputter delivers simplified structured logs over TCP.
+ """
+ log_config = {
+ "drains": {
+ "tersejson": {
+ "type": "network_json_terse",
+ "host": "127.0.0.1",
+ "port": 8000,
+ }
+ }
+ }
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs, self.hs.config, log_config, logBeginner=beginner
+ )
+
+ logger = Logger(
+ namespace="tests.logging.test_terse_json", observer=beginner.observers[0]
+ )
+ logger.info("Hello there, {name}!", name="wally")
+
+ # Trigger the connection
+ self.pump()
+
+ _, server = connect_client(self.reactor, 0)
+
+ # Trigger data being sent
+ self.pump()
+
+ # One log message, with a single trailing newline
+ logs = server.data.decode("utf8").splitlines()
+ self.assertEqual(len(logs), 1)
+ self.assertEqual(server.data.count(b"\n"), 1)
+
+ log = json.loads(logs[0])
+
+ # The terse logger should give us these keys.
+ expected_log_keys = [
+ "log",
+ "time",
+ "level",
+ "log_namespace",
+ "request",
+ "scope",
+ "server_name",
+ "name",
+ ]
+ self.assertEqual(set(log.keys()), set(expected_log_keys))
+
+ # It contains the data we expect.
+ self.assertEqual(log["name"], "wally")
+
+ def test_log_backpressure_debug(self):
+ """
+ When backpressure is hit, DEBUG logs will be shed.
+ """
+ log_config = {
+ "loggers": {"synapse": {"level": "DEBUG"}},
+ "drains": {
+ "tersejson": {
+ "type": "network_json_terse",
+ "host": "127.0.0.1",
+ "port": 8000,
+ "maximum_buffer": 10,
+ }
+ },
+ }
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs,
+ self.hs.config,
+ log_config,
+ logBeginner=beginner,
+ redirect_stdlib_logging=False,
+ )
+
+ logger = Logger(
+ namespace="synapse.logging.test_terse_json", observer=beginner.observers[0]
+ )
+
+ # Send some debug messages
+ for i in range(0, 3):
+ logger.debug("debug %s" % (i,))
+
+ # Send a bunch of useful messages
+ for i in range(0, 7):
+ logger.info("test message %s" % (i,))
+
+ # The last debug message pushes it past the maximum buffer
+ logger.debug("too much debug")
+
+ # Allow the reconnection
+ _, server = connect_client(self.reactor, 0)
+ self.pump()
+
+ # Only the 7 infos made it through, the debugs were elided
+ logs = server.data.splitlines()
+ self.assertEqual(len(logs), 7)
+
+ def test_log_backpressure_info(self):
+ """
+ When backpressure is hit, DEBUG and INFO logs will be shed.
+ """
+ log_config = {
+ "loggers": {"synapse": {"level": "DEBUG"}},
+ "drains": {
+ "tersejson": {
+ "type": "network_json_terse",
+ "host": "127.0.0.1",
+ "port": 8000,
+ "maximum_buffer": 10,
+ }
+ },
+ }
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs,
+ self.hs.config,
+ log_config,
+ logBeginner=beginner,
+ redirect_stdlib_logging=False,
+ )
+
+ logger = Logger(
+ namespace="synapse.logging.test_terse_json", observer=beginner.observers[0]
+ )
+
+ # Send some debug messages
+ for i in range(0, 3):
+ logger.debug("debug %s" % (i,))
+
+ # Send a bunch of useful messages
+ for i in range(0, 10):
+ logger.warn("test warn %s" % (i,))
+
+ # Send a bunch of info messages
+ for i in range(0, 3):
+ logger.info("test message %s" % (i,))
+
+ # The last debug message pushes it past the maximum buffer
+ logger.debug("too much debug")
+
+ # Allow the reconnection
+ client, server = connect_client(self.reactor, 0)
+ self.pump()
+
+ # The 10 warnings made it through, the debugs and infos were elided
+ logs = list(map(json.loads, server.data.decode("utf8").splitlines()))
+ self.assertEqual(len(logs), 10)
+
+ self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10})
+
+ def test_log_backpressure_cut_middle(self):
+ """
+ When backpressure is hit, and no more DEBUG and INFOs cannot be culled,
+ it will cut the middle messages out.
+ """
+ log_config = {
+ "loggers": {"synapse": {"level": "DEBUG"}},
+ "drains": {
+ "tersejson": {
+ "type": "network_json_terse",
+ "host": "127.0.0.1",
+ "port": 8000,
+ "maximum_buffer": 10,
+ }
+ },
+ }
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs,
+ self.hs.config,
+ log_config,
+ logBeginner=beginner,
+ redirect_stdlib_logging=False,
+ )
+
+ logger = Logger(
+ namespace="synapse.logging.test_terse_json", observer=beginner.observers[0]
+ )
+
+ # Send a bunch of useful messages
+ for i in range(0, 20):
+ logger.warn("test warn", num=i)
+
+ # Allow the reconnection
+ client, server = connect_client(self.reactor, 0)
+ self.pump()
+
+ # The first five and last five warnings made it through, the debugs and
+ # infos were elided
+ logs = list(map(json.loads, server.data.decode("utf8").splitlines()))
+ self.assertEqual(len(logs), 10)
+ self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10})
+ self.assertEqual([0, 1, 2, 3, 4, 15, 16, 17, 18, 19], [x["num"] for x in logs])
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index 8ce6bb62da..af2327fb66 100644
--- a/tests/push/test_http.py
+++ b/tests/push/test_http.py
@@ -50,7 +50,7 @@ class HTTPPusherTests(HomeserverTestCase):
config = self.default_config()
config["start_pushers"] = True
- hs = self.setup_test_homeserver(config=config, simple_http_client=m)
+ hs = self.setup_test_homeserver(config=config, proxied_http_client=m)
return hs
diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py
index c973521907..f81f81602e 100644
--- a/tests/rest/client/test_identity.py
+++ b/tests/rest/client/test_identity.py
@@ -15,15 +15,22 @@
import json
+from mock import Mock
+
+from twisted.internet import defer
+
import synapse.rest.admin
from synapse.rest.client.v1 import login, room
+from synapse.rest.client.v2_alpha import account
from tests import unittest
-class IdentityTestCase(unittest.HomeserverTestCase):
+class IdentityDisabledTestCase(unittest.HomeserverTestCase):
+ """Tests that 3PID lookup attempts fail when the HS's config disallows them."""
servlets = [
+ account.register_servlets,
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
@@ -32,19 +39,93 @@ class IdentityTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
config = self.default_config()
+ config["trusted_third_party_id_servers"] = ["testis"]
config["enable_3pid_lookup"] = False
self.hs = self.setup_test_homeserver(config=config)
return self.hs
+ def prepare(self, reactor, clock, hs):
+ self.user_id = self.register_user("kermit", "monkey")
+ self.tok = self.login("kermit", "monkey")
+
+ def test_3pid_invite_disabled(self):
+ request, channel = self.make_request(
+ b"POST", "/createRoom", b"{}", access_token=self.tok
+ )
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"200", channel.result)
+ room_id = channel.json_body["room_id"]
+
+ params = {
+ "id_server": "testis",
+ "medium": "email",
+ "address": "test@example.com",
+ }
+ request_data = json.dumps(params)
+ request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii")
+ request, channel = self.make_request(
+ b"POST", request_url, request_data, access_token=self.tok
+ )
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"403", channel.result)
+
def test_3pid_lookup_disabled(self):
- self.hs.config.enable_3pid_lookup = False
+ url = (
+ "/_matrix/client/unstable/account/3pid/lookup"
+ "?id_server=testis&medium=email&address=foo@bar.baz"
+ )
+ request, channel = self.make_request("GET", url, access_token=self.tok)
+ self.render(request)
+ self.assertEqual(channel.result["code"], b"403", channel.result)
+
+ def test_3pid_bulk_lookup_disabled(self):
+ url = "/_matrix/client/unstable/account/3pid/bulk_lookup"
+ data = {
+ "id_server": "testis",
+ "threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]],
+ }
+ request_data = json.dumps(data)
+ request, channel = self.make_request(
+ "POST", url, request_data, access_token=self.tok
+ )
+ self.render(request)
+ self.assertEqual(channel.result["code"], b"403", channel.result)
+
+
+class IdentityEnabledTestCase(unittest.HomeserverTestCase):
+ """Tests that 3PID lookup attempts succeed when the HS's config allows them."""
- self.register_user("kermit", "monkey")
- tok = self.login("kermit", "monkey")
+ servlets = [
+ account.register_servlets,
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+
+ config = self.default_config()
+ config["enable_3pid_lookup"] = True
+ config["trusted_third_party_id_servers"] = ["testis"]
+
+ mock_http_client = Mock(spec=["get_json", "post_json_get_json"])
+ mock_http_client.get_json.return_value = defer.succeed((200, "{}"))
+ mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}"))
+
+ self.hs = self.setup_test_homeserver(
+ config=config, simple_http_client=mock_http_client
+ )
+ return self.hs
+
+ def prepare(self, reactor, clock, hs):
+ self.user_id = self.register_user("kermit", "monkey")
+ self.tok = self.login("kermit", "monkey")
+
+ def test_3pid_invite_enabled(self):
request, channel = self.make_request(
- b"POST", "/createRoom", b"{}", access_token=tok
+ b"POST", "/createRoom", b"{}", access_token=self.tok
)
self.render(request)
self.assertEquals(channel.result["code"], b"200", channel.result)
@@ -58,7 +139,44 @@ class IdentityTestCase(unittest.HomeserverTestCase):
request_data = json.dumps(params)
request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii")
request, channel = self.make_request(
- b"POST", request_url, request_data, access_token=tok
+ b"POST", request_url, request_data, access_token=self.tok
)
self.render(request)
- self.assertEquals(channel.result["code"], b"403", channel.result)
+
+ get_json = self.hs.get_simple_http_client().get_json
+ get_json.assert_called_once_with(
+ "https://testis/_matrix/identity/api/v1/lookup",
+ {"address": "test@example.com", "medium": "email"},
+ )
+
+ def test_3pid_lookup_enabled(self):
+ url = (
+ "/_matrix/client/unstable/account/3pid/lookup"
+ "?id_server=testis&medium=email&address=foo@bar.baz"
+ )
+ request, channel = self.make_request("GET", url, access_token=self.tok)
+ self.render(request)
+
+ get_json = self.hs.get_simple_http_client().get_json
+ get_json.assert_called_once_with(
+ "https://testis/_matrix/identity/api/v1/lookup",
+ {"address": "foo@bar.baz", "medium": "email"},
+ )
+
+ def test_3pid_bulk_lookup_enabled(self):
+ url = "/_matrix/client/unstable/account/3pid/bulk_lookup"
+ data = {
+ "id_server": "testis",
+ "threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]],
+ }
+ request_data = json.dumps(data)
+ request, channel = self.make_request(
+ "POST", url, request_data, access_token=self.tok
+ )
+ self.render(request)
+
+ post_json = self.hs.get_simple_http_client().post_json_get_json
+ post_json.assert_called_once_with(
+ "https://testis/_matrix/identity/api/v1/bulk_lookup",
+ {"threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]]},
+ )
diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py
index fe66e397c4..d2bcf256fa 100644
--- a/tests/rest/client/test_redactions.py
+++ b/tests/rest/client/test_redactions.py
@@ -30,6 +30,14 @@ class RedactionsTestCase(HomeserverTestCase):
sync.register_servlets,
]
+ def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+
+ config["rc_message"] = {"per_second": 0.2, "burst_count": 10}
+ config["rc_admin_redaction"] = {"per_second": 1, "burst_count": 100}
+
+ return self.setup_test_homeserver(config=config)
+
def prepare(self, reactor, clock, hs):
# register a couple of users
self.mod_user_id = self.register_user("user1", "pass")
@@ -177,3 +185,20 @@ class RedactionsTestCase(HomeserverTestCase):
self._redact_event(
self.other_access_token, self.room_id, create_event_id, expect_code=403
)
+
+ def test_redact_event_as_moderator_ratelimit(self):
+ """Tests that the correct ratelimiting is applied to redactions
+ """
+
+ message_ids = []
+ # as a regular user, send messages to redact
+ for _ in range(20):
+ b = self.helper.send(room_id=self.room_id, tok=self.other_access_token)
+ message_ids.append(b["event_id"])
+ self.reactor.advance(10) # To get around ratelimits
+
+ # as the moderator, send a bunch of redactions
+ for msg_id in message_ids:
+ # These should all succeed, even though this would be denied by
+ # the standard message ratelimiter
+ self._redact_event(self.mod_access_token, self.room_id, msg_id)
diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
new file mode 100644
index 0000000000..4303f95206
--- /dev/null
+++ b/tests/rest/client/test_retention.py
@@ -0,0 +1,292 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from mock import Mock
+
+from synapse.api.constants import EventTypes
+from synapse.rest import admin
+from synapse.rest.client.v1 import login, room
+from synapse.visibility import filter_events_for_client
+
+from tests import unittest
+
+one_hour_ms = 3600000
+one_day_ms = one_hour_ms * 24
+
+
+class RetentionTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+ config["default_room_version"] = "1"
+ config["retention"] = {
+ "enabled": True,
+ "default_policy": {
+ "min_lifetime": one_day_ms,
+ "max_lifetime": one_day_ms * 3,
+ },
+ "allowed_lifetime_min": one_day_ms,
+ "allowed_lifetime_max": one_day_ms * 3,
+ }
+
+ self.hs = self.setup_test_homeserver(config=config)
+ return self.hs
+
+ def prepare(self, reactor, clock, homeserver):
+ self.user_id = self.register_user("user", "password")
+ self.token = self.login("user", "password")
+
+ def test_retention_state_event(self):
+ """Tests that the server configuration can limit the values a user can set to the
+ room's retention policy.
+ """
+ room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+ self.helper.send_state(
+ room_id=room_id,
+ event_type=EventTypes.Retention,
+ body={"max_lifetime": one_day_ms * 4},
+ tok=self.token,
+ expect_code=400,
+ )
+
+ self.helper.send_state(
+ room_id=room_id,
+ event_type=EventTypes.Retention,
+ body={"max_lifetime": one_hour_ms},
+ tok=self.token,
+ expect_code=400,
+ )
+
+ def test_retention_event_purged_with_state_event(self):
+ """Tests that expired events are correctly purged when the room's retention policy
+ is defined by a state event.
+ """
+ room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+ # Set the room's retention period to 2 days.
+ lifetime = one_day_ms * 2
+ self.helper.send_state(
+ room_id=room_id,
+ event_type=EventTypes.Retention,
+ body={"max_lifetime": lifetime},
+ tok=self.token,
+ )
+
+ self._test_retention_event_purged(room_id, one_day_ms * 1.5)
+
+ def test_retention_event_purged_without_state_event(self):
+ """Tests that expired events are correctly purged when the room's retention policy
+ is defined by the server's configuration's default retention policy.
+ """
+ room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+ self._test_retention_event_purged(room_id, one_day_ms * 2)
+
+ def test_visibility(self):
+ """Tests that synapse.visibility.filter_events_for_client correctly filters out
+ outdated events
+ """
+ store = self.hs.get_datastore()
+ room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+ events = []
+
+ # Send a first event, which should be filtered out at the end of the test.
+ resp = self.helper.send(room_id=room_id, body="1", tok=self.token)
+
+ # Get the event from the store so that we end up with a FrozenEvent that we can
+ # give to filter_events_for_client. We need to do this now because the event won't
+ # be in the database anymore after it has expired.
+ events.append(self.get_success(store.get_event(resp.get("event_id"))))
+
+ # Advance the time by 2 days. We're using the default retention policy, therefore
+ # after this the first event will still be valid.
+ self.reactor.advance(one_day_ms * 2 / 1000)
+
+ # Send another event, which shouldn't get filtered out.
+ resp = self.helper.send(room_id=room_id, body="2", tok=self.token)
+
+ valid_event_id = resp.get("event_id")
+
+ events.append(self.get_success(store.get_event(valid_event_id)))
+
+ # Advance the time by anothe 2 days. After this, the first event should be
+ # outdated but not the second one.
+ self.reactor.advance(one_day_ms * 2 / 1000)
+
+ # Run filter_events_for_client with our list of FrozenEvents.
+ filtered_events = self.get_success(
+ filter_events_for_client(store, self.user_id, events)
+ )
+
+ # We should only get one event back.
+ self.assertEqual(len(filtered_events), 1, filtered_events)
+ # That event should be the second, not outdated event.
+ self.assertEqual(filtered_events[0].event_id, valid_event_id, filtered_events)
+
+ def _test_retention_event_purged(self, room_id, increment):
+ # Get the create event to, later, check that we can still access it.
+ message_handler = self.hs.get_message_handler()
+ create_event = self.get_success(
+ message_handler.get_room_data(self.user_id, room_id, EventTypes.Create)
+ )
+
+ # Send a first event to the room. This is the event we'll want to be purged at the
+ # end of the test.
+ resp = self.helper.send(room_id=room_id, body="1", tok=self.token)
+
+ expired_event_id = resp.get("event_id")
+
+ # Check that we can retrieve the event.
+ expired_event = self.get_event(room_id, expired_event_id)
+ self.assertEqual(
+ expired_event.get("content", {}).get("body"), "1", expired_event
+ )
+
+ # Advance the time.
+ self.reactor.advance(increment / 1000)
+
+ # Send another event. We need this because the purge job won't purge the most
+ # recent event in the room.
+ resp = self.helper.send(room_id=room_id, body="2", tok=self.token)
+
+ valid_event_id = resp.get("event_id")
+
+ # Advance the time again. Now our first event should have expired but our second
+ # one should still be kept.
+ self.reactor.advance(increment / 1000)
+
+ # Check that the event has been purged from the database.
+ self.get_event(room_id, expired_event_id, expected_code=404)
+
+ # Check that the event that hasn't been purged can still be retrieved.
+ valid_event = self.get_event(room_id, valid_event_id)
+ self.assertEqual(valid_event.get("content", {}).get("body"), "2", valid_event)
+
+ # Check that we can still access state events that were sent before the event that
+ # has been purged.
+ self.get_event(room_id, create_event.event_id)
+
+ def get_event(self, room_id, event_id, expected_code=200):
+ url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id)
+
+ request, channel = self.make_request("GET", url, access_token=self.token)
+ self.render(request)
+
+ self.assertEqual(channel.code, expected_code, channel.result)
+
+ return channel.json_body
+
+
+class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+ config["default_room_version"] = "1"
+ config["retention"] = {"enabled": True}
+
+ mock_federation_client = Mock(spec=["backfill"])
+
+ self.hs = self.setup_test_homeserver(
+ config=config, federation_client=mock_federation_client
+ )
+ return self.hs
+
+ def prepare(self, reactor, clock, homeserver):
+ self.user_id = self.register_user("user", "password")
+ self.token = self.login("user", "password")
+
+ def test_no_default_policy(self):
+ """Tests that an event doesn't get expired if there is neither a default retention
+ policy nor a policy specific to the room.
+ """
+ room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+ self._test_retention(room_id)
+
+ def test_state_policy(self):
+ """Tests that an event gets correctly expired if there is no default retention
+ policy but there's a policy specific to the room.
+ """
+ room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+ # Set the maximum lifetime to 35 days so that the first event gets expired but not
+ # the second one.
+ self.helper.send_state(
+ room_id=room_id,
+ event_type=EventTypes.Retention,
+ body={"max_lifetime": one_day_ms * 35},
+ tok=self.token,
+ )
+
+ self._test_retention(room_id, expected_code_for_first_event=404)
+
+ def _test_retention(self, room_id, expected_code_for_first_event=200):
+ # Send a first event to the room. This is the event we'll want to be purged at the
+ # end of the test.
+ resp = self.helper.send(room_id=room_id, body="1", tok=self.token)
+
+ first_event_id = resp.get("event_id")
+
+ # Check that we can retrieve the event.
+ expired_event = self.get_event(room_id, first_event_id)
+ self.assertEqual(
+ expired_event.get("content", {}).get("body"), "1", expired_event
+ )
+
+ # Advance the time by a month.
+ self.reactor.advance(one_day_ms * 30 / 1000)
+
+ # Send another event. We need this because the purge job won't purge the most
+ # recent event in the room.
+ resp = self.helper.send(room_id=room_id, body="2", tok=self.token)
+
+ second_event_id = resp.get("event_id")
+
+ # Advance the time by another month.
+ self.reactor.advance(one_day_ms * 30 / 1000)
+
+ # Check if the event has been purged from the database.
+ first_event = self.get_event(
+ room_id, first_event_id, expected_code=expected_code_for_first_event
+ )
+
+ if expected_code_for_first_event == 200:
+ self.assertEqual(
+ first_event.get("content", {}).get("body"), "1", first_event
+ )
+
+ # Check that the event that hasn't been purged can still be retrieved.
+ second_event = self.get_event(room_id, second_event_id)
+ self.assertEqual(second_event.get("content", {}).get("body"), "2", second_event)
+
+ def get_event(self, room_id, event_id, expected_code=200):
+ url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id)
+
+ request, channel = self.make_request("GET", url, access_token=self.token)
+ self.render(request)
+
+ self.assertEqual(channel.code, expected_code, channel.result)
+
+ return channel.json_body
diff --git a/tests/rest/client/test_room_access_rules.py b/tests/rest/client/test_room_access_rules.py
new file mode 100644
index 0000000000..d44f5c2c8c
--- /dev/null
+++ b/tests/rest/client/test_room_access_rules.py
@@ -0,0 +1,721 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import json
+import random
+import string
+
+from mock import Mock
+
+from twisted.internet import defer
+from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
+from synapse.rest import admin
+from synapse.rest.client.v1 import login, room
+from synapse.third_party_rules.access_rules import (
+ ACCESS_RULE_DIRECT,
+ ACCESS_RULE_RESTRICTED,
+ ACCESS_RULE_UNRESTRICTED,
+ ACCESS_RULES_TYPE,
+)
+
+from tests import unittest
+
+
+class RoomAccessTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+
+ config["third_party_event_rules"] = {
+ "module": "synapse.third_party_rules.access_rules.RoomAccessRules",
+ "config": {
+ "domains_forbidden_when_restricted": ["forbidden_domain"],
+ "id_server": "testis",
+ },
+ }
+ config["trusted_third_party_id_servers"] = ["testis"]
+
+ def send_invite(destination, room_id, event_id, pdu):
+ return defer.succeed(pdu)
+
+ def get_json(uri, args={}, headers=None):
+ address_domain = args["address"].split("@")[1]
+ return defer.succeed({"hs": address_domain})
+
+ def post_json_get_json(uri, post_json, args={}, headers=None):
+ token = "".join(random.choice(string.ascii_letters) for _ in range(10))
+ return defer.succeed(
+ {
+ "token": token,
+ "public_keys": [
+ {
+ "public_key": "serverpublickey",
+ "key_validity_url": "https://testis/pubkey/isvalid",
+ },
+ {
+ "public_key": "phemeralpublickey",
+ "key_validity_url": "https://testis/pubkey/ephemeral/isvalid",
+ },
+ ],
+ "display_name": "f...@b...",
+ }
+ )
+
+ mock_federation_client = Mock(spec=["send_invite"])
+ mock_federation_client.send_invite.side_effect = send_invite
+
+ mock_http_client = Mock(
+ spec=["get_json", "post_json_get_json"],
+ )
+ # Mocking the response for /info on the IS API.
+ mock_http_client.get_json.side_effect = get_json
+ # Mocking the response for /store-invite on the IS API.
+ mock_http_client.post_json_get_json.side_effect = post_json_get_json
+ self.hs = self.setup_test_homeserver(
+ config=config,
+ federation_client=mock_federation_client,
+ simple_http_client=mock_http_client,
+ )
+
+ return self.hs
+
+ def prepare(self, reactor, clock, homeserver):
+ self.user_id = self.register_user("kermit", "monkey")
+ self.tok = self.login("kermit", "monkey")
+
+ self.restricted_room = self.create_room()
+ self.unrestricted_room = self.create_room(rule=ACCESS_RULE_UNRESTRICTED)
+ self.direct_rooms = [
+ self.create_room(direct=True),
+ self.create_room(direct=True),
+ self.create_room(direct=True),
+ ]
+
+ self.invitee_id = self.register_user("invitee", "test")
+ self.invitee_tok = self.login("invitee", "test")
+
+ self.helper.invite(
+ room=self.direct_rooms[0],
+ src=self.user_id,
+ targ=self.invitee_id,
+ tok=self.tok,
+ )
+
+ def test_create_room_no_rule(self):
+ """Tests that creating a room with no rule will set the default value."""
+ room_id = self.create_room()
+ rule = self.current_rule_in_room(room_id)
+
+ self.assertEqual(rule, ACCESS_RULE_RESTRICTED)
+
+ def test_create_room_direct_no_rule(self):
+ """Tests that creating a direct room with no rule will set the default value."""
+ room_id = self.create_room(direct=True)
+ rule = self.current_rule_in_room(room_id)
+
+ self.assertEqual(rule, ACCESS_RULE_DIRECT)
+
+ def test_create_room_valid_rule(self):
+ """Tests that creating a room with a valid rule will set the right value."""
+ room_id = self.create_room(rule=ACCESS_RULE_UNRESTRICTED)
+ rule = self.current_rule_in_room(room_id)
+
+ self.assertEqual(rule, ACCESS_RULE_UNRESTRICTED)
+
+ def test_create_room_invalid_rule(self):
+ """Tests that creating a room with an invalid rule will set fail."""
+ self.create_room(rule=ACCESS_RULE_DIRECT, expected_code=400)
+
+ def test_create_room_direct_invalid_rule(self):
+ """Tests that creating a direct room with an invalid rule will fail.
+ """
+ self.create_room(direct=True, rule=ACCESS_RULE_RESTRICTED, expected_code=400)
+
+ def test_public_room(self):
+ """Tests that it's not possible to have a room with the public join rule and an
+ access rule that's not restricted.
+ """
+ # Creating a room with the public_chat preset should succeed and set the access
+ # rule to restricted.
+ preset_room_id = self.create_room(preset=RoomCreationPreset.PUBLIC_CHAT)
+ self.assertEqual(
+ self.current_rule_in_room(preset_room_id), ACCESS_RULE_RESTRICTED
+ )
+
+ # Creating a room with the public join rule in its initial state should succeed
+ # and set the access rule to restricted.
+ init_state_room_id = self.create_room(
+ initial_state=[
+ {
+ "type": "m.room.join_rules",
+ "content": {"join_rule": JoinRules.PUBLIC},
+ }
+ ]
+ )
+ self.assertEqual(
+ self.current_rule_in_room(init_state_room_id), ACCESS_RULE_RESTRICTED
+ )
+
+ # Changing access rule to unrestricted should fail.
+ self.change_rule_in_room(
+ preset_room_id, ACCESS_RULE_UNRESTRICTED, expected_code=403
+ )
+ self.change_rule_in_room(
+ init_state_room_id, ACCESS_RULE_UNRESTRICTED, expected_code=403
+ )
+
+ # Changing access rule to direct should fail.
+ self.change_rule_in_room(preset_room_id, ACCESS_RULE_DIRECT, expected_code=403)
+ self.change_rule_in_room(
+ init_state_room_id, ACCESS_RULE_DIRECT, expected_code=403
+ )
+
+ # Changing join rule to public in an unrestricted room should fail.
+ self.change_join_rule_in_room(
+ self.unrestricted_room, JoinRules.PUBLIC, expected_code=403
+ )
+ # Changing join rule to public in an direct room should fail.
+ self.change_join_rule_in_room(
+ self.direct_rooms[0], JoinRules.PUBLIC, expected_code=403
+ )
+
+ # Creating a new room with the public_chat preset and an access rule that isn't
+ # restricted should fail.
+ self.create_room(
+ preset=RoomCreationPreset.PUBLIC_CHAT,
+ rule=ACCESS_RULE_UNRESTRICTED,
+ expected_code=400,
+ )
+ self.create_room(
+ preset=RoomCreationPreset.PUBLIC_CHAT,
+ rule=ACCESS_RULE_DIRECT,
+ expected_code=400,
+ )
+
+ # Creating a room with the public join rule in its initial state and an access
+ # rule that isn't restricted should fail.
+ self.create_room(
+ initial_state=[
+ {
+ "type": "m.room.join_rules",
+ "content": {"join_rule": JoinRules.PUBLIC},
+ }
+ ],
+ rule=ACCESS_RULE_UNRESTRICTED,
+ expected_code=400,
+ )
+ self.create_room(
+ initial_state=[
+ {
+ "type": "m.room.join_rules",
+ "content": {"join_rule": JoinRules.PUBLIC},
+ }
+ ],
+ rule=ACCESS_RULE_DIRECT,
+ expected_code=400,
+ )
+
+ def test_restricted(self):
+ """Tests that in restricted mode we're unable to invite users from blacklisted
+ servers but can invite other users.
+ """
+ # We can't invite a user from a forbidden HS.
+ self.helper.invite(
+ room=self.restricted_room,
+ src=self.user_id,
+ targ="@test:forbidden_domain",
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ # We can invite a user which HS isn't forbidden.
+ self.helper.invite(
+ room=self.restricted_room,
+ src=self.user_id,
+ targ="@test:allowed_domain",
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ # We can't send a 3PID invite to an address that is mapped to a forbidden HS.
+ self.send_threepid_invite(
+ address="test@forbidden_domain",
+ room_id=self.restricted_room,
+ expected_code=403,
+ )
+
+ # We can send a 3PID invite to an address that is mapped to an HS that's not
+ # forbidden.
+ self.send_threepid_invite(
+ address="test@allowed_domain",
+ room_id=self.restricted_room,
+ expected_code=200,
+ )
+
+ def test_direct(self):
+ """Tests that, in direct mode, other users than the initial two can't be invited,
+ but the following scenario works:
+ * invited user joins the room
+ * invited user leaves the room
+ * room creator re-invites invited user
+ Also tests that a user from a HS that's in the list of forbidden domains (to use
+ in restricted mode) can be invited.
+ """
+ not_invited_user = "@not_invited:forbidden_domain"
+
+ # We can't invite a new user to the room.
+ self.helper.invite(
+ room=self.direct_rooms[0],
+ src=self.user_id,
+ targ=not_invited_user,
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ # The invited user can join the room.
+ self.helper.join(
+ room=self.direct_rooms[0],
+ user=self.invitee_id,
+ tok=self.invitee_tok,
+ expect_code=200,
+ )
+
+ # The invited user can leave the room.
+ self.helper.leave(
+ room=self.direct_rooms[0],
+ user=self.invitee_id,
+ tok=self.invitee_tok,
+ expect_code=200,
+ )
+
+ # The invited user can be re-invited to the room.
+ self.helper.invite(
+ room=self.direct_rooms[0],
+ src=self.user_id,
+ targ=self.invitee_id,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ # If we're alone in the room and have always been the only member, we can invite
+ # someone.
+ self.helper.invite(
+ room=self.direct_rooms[1],
+ src=self.user_id,
+ targ=not_invited_user,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ # Disable the 3pid invite ratelimiter
+ burst = self.hs.config.rc_third_party_invite.burst_count
+ per_second = self.hs.config.rc_third_party_invite.per_second
+ self.hs.config.rc_third_party_invite.burst_count = 10
+ self.hs.config.rc_third_party_invite.per_second = 0.1
+
+ # We can't send a 3PID invite to a room that already has two members.
+ self.send_threepid_invite(
+ address="test@allowed_domain",
+ room_id=self.direct_rooms[0],
+ expected_code=403,
+ )
+
+ # We can't send a 3PID invite to a room that already has a pending invite.
+ self.send_threepid_invite(
+ address="test@allowed_domain",
+ room_id=self.direct_rooms[1],
+ expected_code=403,
+ )
+
+ # We can send a 3PID invite to a room in which we've always been the only member.
+ self.send_threepid_invite(
+ address="test@forbidden_domain",
+ room_id=self.direct_rooms[2],
+ expected_code=200,
+ )
+
+ # We can send a 3PID invite to a room in which there's a 3PID invite.
+ self.send_threepid_invite(
+ address="test@forbidden_domain",
+ room_id=self.direct_rooms[2],
+ expected_code=403,
+ )
+
+ self.hs.config.rc_third_party_invite.burst_count = burst
+ self.hs.config.rc_third_party_invite.per_second = per_second
+
+ def test_unrestricted(self):
+ """Tests that, in unrestricted mode, we can invite whoever we want, but we can
+ only change the power level of users that wouldn't be forbidden in restricted
+ mode.
+ """
+ # We can invite
+ self.helper.invite(
+ room=self.unrestricted_room,
+ src=self.user_id,
+ targ="@test:forbidden_domain",
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.invite(
+ room=self.unrestricted_room,
+ src=self.user_id,
+ targ="@test:not_forbidden_domain",
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ # We can send a 3PID invite to an address that is mapped to a forbidden HS.
+ self.send_threepid_invite(
+ address="test@forbidden_domain",
+ room_id=self.unrestricted_room,
+ expected_code=200,
+ )
+
+ # We can send a 3PID invite to an address that is mapped to an HS that's not
+ # forbidden.
+ self.send_threepid_invite(
+ address="test@allowed_domain",
+ room_id=self.unrestricted_room,
+ expected_code=200,
+ )
+
+ # We can send a power level event that doesn't redefine the default PL or set a
+ # non-default PL for a user that would be forbidden in restricted mode.
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.PowerLevels,
+ body={"users": {self.user_id: 100, "@test:not_forbidden_domain": 10}},
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ # We can't send a power level event that redefines the default PL and doesn't set
+ # a non-default PL for a user that would be forbidden in restricted mode.
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.PowerLevels,
+ body={
+ "users": {self.user_id: 100, "@test:not_forbidden_domain": 10},
+ "users_default": 10,
+ },
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ # We can't send a power level event that doesn't redefines the default PL but sets
+ # a non-default PL for a user that would be forbidden in restricted mode.
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.PowerLevels,
+ body={"users": {self.user_id: 100, "@test:forbidden_domain": 10}},
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ def test_change_rules(self):
+ """Tests that we can only change the current rule from restricted to
+ unrestricted.
+ """
+ # We can change the rule from restricted to unrestricted.
+ self.change_rule_in_room(
+ room_id=self.restricted_room,
+ new_rule=ACCESS_RULE_UNRESTRICTED,
+ expected_code=200,
+ )
+
+ # We can't change the rule from restricted to direct.
+ self.change_rule_in_room(
+ room_id=self.restricted_room, new_rule=ACCESS_RULE_DIRECT, expected_code=403
+ )
+
+ # We can't change the rule from unrestricted to restricted.
+ self.change_rule_in_room(
+ room_id=self.unrestricted_room,
+ new_rule=ACCESS_RULE_RESTRICTED,
+ expected_code=403,
+ )
+
+ # We can't change the rule from unrestricted to direct.
+ self.change_rule_in_room(
+ room_id=self.unrestricted_room,
+ new_rule=ACCESS_RULE_DIRECT,
+ expected_code=403,
+ )
+
+ # We can't change the rule from direct to restricted.
+ self.change_rule_in_room(
+ room_id=self.direct_rooms[0],
+ new_rule=ACCESS_RULE_RESTRICTED,
+ expected_code=403,
+ )
+
+ # We can't change the rule from direct to unrestricted.
+ self.change_rule_in_room(
+ room_id=self.direct_rooms[0],
+ new_rule=ACCESS_RULE_UNRESTRICTED,
+ expected_code=403,
+ )
+
+ def test_change_room_avatar(self):
+ """Tests that changing the room avatar is always allowed unless the room is a
+ direct chat, in which case it's forbidden.
+ """
+
+ avatar_content = {
+ "info": {"h": 398, "mimetype": "image/jpeg", "size": 31037, "w": 394},
+ "url": "mxc://example.org/JWEIFJgwEIhweiWJE",
+ }
+
+ self.helper.send_state(
+ room_id=self.restricted_room,
+ event_type=EventTypes.RoomAvatar,
+ body=avatar_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.RoomAvatar,
+ body=avatar_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.direct_rooms[0],
+ event_type=EventTypes.RoomAvatar,
+ body=avatar_content,
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ def test_change_room_name(self):
+ """Tests that changing the room name is always allowed unless the room is a direct
+ chat, in which case it's forbidden.
+ """
+
+ name_content = {"name": "My super room"}
+
+ self.helper.send_state(
+ room_id=self.restricted_room,
+ event_type=EventTypes.Name,
+ body=name_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.Name,
+ body=name_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.direct_rooms[0],
+ event_type=EventTypes.Name,
+ body=name_content,
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ def test_change_room_topic(self):
+ """Tests that changing the room topic is always allowed unless the room is a
+ direct chat, in which case it's forbidden.
+ """
+
+ topic_content = {"topic": "Welcome to this room"}
+
+ self.helper.send_state(
+ room_id=self.restricted_room,
+ event_type=EventTypes.Topic,
+ body=topic_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.Topic,
+ body=topic_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.direct_rooms[0],
+ event_type=EventTypes.Topic,
+ body=topic_content,
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ def test_revoke_3pid_invite_direct(self):
+ """Tests that revoking a 3PID invite doesn't cause the room access rules module to
+ confuse the revokation as a new 3PID invite.
+ """
+ invite_token = "sometoken"
+
+ invite_body = {
+ "display_name": "ker...@exa...",
+ "public_keys": [
+ {
+ "key_validity_url": "https://validity_url",
+ "public_key": "ta8IQ0u1sp44HVpxYi7dFOdS/bfwDjcy4xLFlfY5KOA",
+ },
+ {
+ "key_validity_url": "https://validity_url",
+ "public_key": "4_9nzEeDwR5N9s51jPodBiLnqH43A2_g2InVT137t9I",
+ },
+ ],
+ "key_validity_url": "https://validity_url",
+ "public_key": "ta8IQ0u1sp44HVpxYi7dFOdS/bfwDjcy4xLFlfY5KOA",
+ }
+
+ self.send_state_with_state_key(
+ room_id=self.direct_rooms[1],
+ event_type=EventTypes.ThirdPartyInvite,
+ state_key=invite_token,
+ body=invite_body,
+ tok=self.tok,
+ )
+
+ self.send_state_with_state_key(
+ room_id=self.direct_rooms[1],
+ event_type=EventTypes.ThirdPartyInvite,
+ state_key=invite_token,
+ body={},
+ tok=self.tok,
+ )
+
+ invite_token = "someothertoken"
+
+ self.send_state_with_state_key(
+ room_id=self.direct_rooms[1],
+ event_type=EventTypes.ThirdPartyInvite,
+ state_key=invite_token,
+ body=invite_body,
+ tok=self.tok,
+ )
+
+ def create_room(
+ self,
+ direct=False,
+ rule=None,
+ preset=RoomCreationPreset.TRUSTED_PRIVATE_CHAT,
+ initial_state=None,
+ expected_code=200,
+ ):
+ content = {"is_direct": direct, "preset": preset}
+
+ if rule:
+ content["initial_state"] = [
+ {"type": ACCESS_RULES_TYPE, "state_key": "", "content": {"rule": rule}}
+ ]
+
+ if initial_state:
+ if "initial_state" not in content:
+ content["initial_state"] = []
+
+ content["initial_state"] += initial_state
+
+ request, channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/createRoom",
+ json.dumps(content),
+ access_token=self.tok,
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, expected_code, channel.result)
+
+ if expected_code == 200:
+ return channel.json_body["room_id"]
+
+ def current_rule_in_room(self, room_id):
+ request, channel = self.make_request(
+ "GET",
+ "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, ACCESS_RULES_TYPE),
+ access_token=self.tok,
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, 200, channel.result)
+ return channel.json_body["rule"]
+
+ def change_rule_in_room(self, room_id, new_rule, expected_code=200):
+ data = {"rule": new_rule}
+ request, channel = self.make_request(
+ "PUT",
+ "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, ACCESS_RULES_TYPE),
+ json.dumps(data),
+ access_token=self.tok,
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, expected_code, channel.result)
+
+ def change_join_rule_in_room(self, room_id, new_join_rule, expected_code=200):
+ data = {"join_rule": new_join_rule}
+ request, channel = self.make_request(
+ "PUT",
+ "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, EventTypes.JoinRules),
+ json.dumps(data),
+ access_token=self.tok,
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, expected_code, channel.result)
+
+ def send_threepid_invite(self, address, room_id, expected_code=200):
+ params = {"id_server": "testis", "medium": "email", "address": address}
+
+ request, channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/rooms/%s/invite" % room_id,
+ json.dumps(params),
+ access_token=self.tok,
+ )
+ self.render(request)
+ self.assertEqual(channel.code, expected_code, channel.result)
+
+ def send_state_with_state_key(
+ self, room_id, event_type, state_key, body, tok, expect_code=200
+ ):
+ path = "/_matrix/client/r0/rooms/%s/state/%s/%s" % (
+ room_id,
+ event_type,
+ state_key,
+ )
+
+ request, channel = self.make_request(
+ "PUT", path, json.dumps(body), access_token=tok
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, expect_code, channel.result)
+
+ return channel.json_body
diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py
index 140d8b3772..02b4b8f5eb 100644
--- a/tests/rest/client/v1/test_profile.py
+++ b/tests/rest/client/v1/test_profile.py
@@ -229,6 +229,7 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase):
config = self.default_config()
config["require_auth_for_profile_requests"] = True
+ config["limit_profile_requests_to_known_users"] = True
self.hs = self.setup_test_homeserver(config=config)
return self.hs
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index 9915367144..cdded88b7f 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -128,8 +128,12 @@ class RestHelper(object):
return channel.json_body
- def send_state(self, room_id, event_type, body, tok, expect_code=200):
- path = "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, event_type)
+ def send_state(self, room_id, event_type, body, tok, expect_code=200, state_key=""):
+ path = "/_matrix/client/r0/rooms/%s/state/%s/%s" % (
+ room_id,
+ event_type,
+ state_key,
+ )
if tok:
path = path + "?access_token=%s" % tok
diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py
index 920de41de4..9fed900f4a 100644
--- a/tests/rest/client/v2_alpha/test_account.py
+++ b/tests/rest/client/v2_alpha/test_account.py
@@ -23,8 +23,8 @@ from email.parser import Parser
import pkg_resources
import synapse.rest.admin
-from synapse.api.constants import LoginType
-from synapse.rest.client.v1 import login
+from synapse.api.constants import LoginType, Membership
+from synapse.rest.client.v1 import login, room
from synapse.rest.client.v2_alpha import account, register
from tests import unittest
@@ -244,6 +244,7 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
account.register_servlets,
+ room.register_servlets,
]
def make_homeserver(self, reactor, clock):
@@ -279,3 +280,56 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
request, channel = self.make_request("GET", "account/whoami")
self.render(request)
self.assertEqual(request.code, 401)
+
+ @unittest.INFO
+ def test_pending_invites(self):
+ """Tests that deactivating a user rejects every pending invite for them."""
+ store = self.hs.get_datastore()
+
+ inviter_id = self.register_user("inviter", "test")
+ inviter_tok = self.login("inviter", "test")
+
+ invitee_id = self.register_user("invitee", "test")
+ invitee_tok = self.login("invitee", "test")
+
+ # Make @inviter:test invite @invitee:test in a new room.
+ room_id = self.helper.create_room_as(inviter_id, tok=inviter_tok)
+ self.helper.invite(
+ room=room_id, src=inviter_id, targ=invitee_id, tok=inviter_tok
+ )
+
+ # Make sure the invite is here.
+ pending_invites = self.get_success(store.get_invited_rooms_for_user(invitee_id))
+ self.assertEqual(len(pending_invites), 1, pending_invites)
+ self.assertEqual(pending_invites[0].room_id, room_id, pending_invites)
+
+ # Deactivate @invitee:test.
+ self.deactivate(invitee_id, invitee_tok)
+
+ # Check that the invite isn't there anymore.
+ pending_invites = self.get_success(store.get_invited_rooms_for_user(invitee_id))
+ self.assertEqual(len(pending_invites), 0, pending_invites)
+
+ # Check that the membership of @invitee:test in the room is now "leave".
+ memberships = self.get_success(
+ store.get_rooms_for_user_where_membership_is(invitee_id, [Membership.LEAVE])
+ )
+ self.assertEqual(len(memberships), 1, memberships)
+ self.assertEqual(memberships[0].room_id, room_id, memberships)
+
+ def deactivate(self, user_id, tok):
+ request_data = json.dumps(
+ {
+ "auth": {
+ "type": "m.login.password",
+ "user": user_id,
+ "password": "test",
+ },
+ "erase": False,
+ }
+ )
+ request, channel = self.make_request(
+ "POST", "account/deactivate", request_data, access_token=tok
+ )
+ self.render(request)
+ self.assertEqual(request.code, 200)
diff --git a/tests/rest/client/v2_alpha/test_password_policy.py b/tests/rest/client/v2_alpha/test_password_policy.py
new file mode 100644
index 0000000000..37f970c6b0
--- /dev/null
+++ b/tests/rest/client/v2_alpha/test_password_policy.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+from synapse.api.constants import LoginType
+from synapse.api.errors import Codes
+from synapse.rest import admin
+from synapse.rest.client.v1 import login
+from synapse.rest.client.v2_alpha import account, password_policy, register
+
+from tests import unittest
+
+
+class PasswordPolicyTestCase(unittest.HomeserverTestCase):
+ """Tests the password policy feature and its compliance with MSC2000.
+
+ When validating a password, Synapse does the necessary checks in this order:
+
+ 1. Password is long enough
+ 2. Password contains digit(s)
+ 3. Password contains symbol(s)
+ 4. Password contains uppercase letter(s)
+ 5. Password contains lowercase letter(s)
+
+ Therefore, each test in this test case that tests whether a password triggers the
+ right error code to be returned provides a password good enough to pass the previous
+ steps but not the one it's testing (nor any step that comes after).
+ """
+
+ servlets = [
+ admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ register.register_servlets,
+ password_policy.register_servlets,
+ account.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ self.register_url = "/_matrix/client/r0/register"
+ self.policy = {
+ "enabled": True,
+ "minimum_length": 10,
+ "require_digit": True,
+ "require_symbol": True,
+ "require_lowercase": True,
+ "require_uppercase": True,
+ }
+
+ config = self.default_config()
+ config["password_config"] = {"policy": self.policy}
+
+ hs = self.setup_test_homeserver(config=config)
+ return hs
+
+ def test_get_policy(self):
+ """Tests if the /password_policy endpoint returns the configured policy."""
+
+ request, channel = self.make_request(
+ "GET", "/_matrix/client/r0/password_policy"
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "m.minimum_length": 10,
+ "m.require_digit": True,
+ "m.require_symbol": True,
+ "m.require_lowercase": True,
+ "m.require_uppercase": True,
+ },
+ channel.result,
+ )
+
+ def test_password_too_short(self):
+ request_data = json.dumps({"username": "kermit", "password": "shorty"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.PASSWORD_TOO_SHORT, channel.result
+ )
+
+ def test_password_no_digit(self):
+ request_data = json.dumps({"username": "kermit", "password": "longerpassword"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT, channel.result
+ )
+
+ def test_password_no_symbol(self):
+ request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.PASSWORD_NO_SYMBOL, channel.result
+ )
+
+ def test_password_no_uppercase(self):
+ request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword!"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.PASSWORD_NO_UPPERCASE, channel.result
+ )
+
+ def test_password_no_lowercase(self):
+ request_data = json.dumps({"username": "kermit", "password": "L0NGERPASSWORD!"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.PASSWORD_NO_LOWERCASE, channel.result
+ )
+
+ def test_password_compliant(self):
+ request_data = json.dumps({"username": "kermit", "password": "L0ngerpassword!"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ # Getting a 401 here means the password has passed validation and the server has
+ # responded with a list of registration flows.
+ self.assertEqual(channel.code, 401, channel.result)
+
+ def test_password_change(self):
+ """This doesn't test every possible use case, only that hitting /account/password
+ triggers the password validation code.
+ """
+ compliant_password = "C0mpl!antpassword"
+ not_compliant_password = "notcompliantpassword"
+
+ user_id = self.register_user("kermit", compliant_password)
+ tok = self.login("kermit", compliant_password)
+
+ request_data = json.dumps(
+ {
+ "new_password": not_compliant_password,
+ "auth": {
+ "password": compliant_password,
+ "type": LoginType.PASSWORD,
+ "user": user_id,
+ },
+ }
+ )
+ request, channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/account/password",
+ request_data,
+ access_token=tok,
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT)
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index bb867150f4..9a5d275d06 100644
--- a/tests/rest/client/v2_alpha/test_register.py
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -19,8 +19,12 @@ import datetime
import json
import os
+from mock import Mock
+
import pkg_resources
+from twisted.internet import defer
+
import synapse.rest.admin
from synapse.api.constants import LoginType
from synapse.api.errors import Codes
@@ -200,6 +204,47 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
self.assertEquals(channel.result["code"], b"200", channel.result)
+class RegisterHideProfileTestCase(unittest.HomeserverTestCase):
+
+ servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource]
+
+ def make_homeserver(self, reactor, clock):
+
+ self.url = b"/_matrix/client/r0/register"
+
+ config = self.default_config()
+ config["enable_registration"] = True
+ config["show_users_in_user_directory"] = False
+ config["replicate_user_profiles_to"] = ["fakeserver"]
+
+ mock_http_client = Mock(spec=["get_json", "post_json_get_json"])
+ mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}"))
+
+ self.hs = self.setup_test_homeserver(
+ config=config, simple_http_client=mock_http_client
+ )
+
+ return self.hs
+
+ def test_profile_hidden(self):
+ user_id = self.register_user("kermit", "monkey")
+
+ post_json = self.hs.get_simple_http_client().post_json_get_json
+
+ # We expect post_json_get_json to have been called twice: once with the original
+ # profile and once with the None profile resulting from the request to hide it
+ # from the user directory.
+ self.assertEqual(post_json.call_count, 2, post_json.call_args_list)
+
+ # Get the args (and not kwargs) passed to post_json.
+ args = post_json.call_args[0]
+ # Make sure the last call was attempting to replicate profiles.
+ split_uri = args[0].split("/")
+ self.assertEqual(split_uri[len(split_uri) - 1], "replicate_profiles", args[0])
+ # Make sure the last profile update was overriding the user's profile to None.
+ self.assertEqual(args[1]["batch"][user_id], None, args[1])
+
+
class AccountValidityTestCase(unittest.HomeserverTestCase):
servlets = [
@@ -208,6 +253,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
login.register_servlets,
sync.register_servlets,
account_validity.register_servlets,
+ account.register_servlets,
]
def make_homeserver(self, reactor, clock):
@@ -300,6 +346,138 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
)
+class AccountValidityUserDirectoryTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ synapse.rest.client.v1.profile.register_servlets,
+ synapse.rest.client.v1.room.register_servlets,
+ synapse.rest.client.v2_alpha.user_directory.register_servlets,
+ login.register_servlets,
+ register.register_servlets,
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ account_validity.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+
+ # Set accounts to expire after a week
+ config["enable_registration"] = True
+ config["account_validity"] = {
+ "enabled": True,
+ "period": 604800000, # Time in ms for 1 week
+ }
+ config["replicate_user_profiles_to"] = "test.is"
+
+ # Mock homeserver requests to an identity server
+ mock_http_client = Mock(spec=["post_json_get_json"])
+ mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}"))
+
+ self.hs = self.setup_test_homeserver(
+ config=config, simple_http_client=mock_http_client
+ )
+
+ return self.hs
+
+ def test_expired_user_in_directory(self):
+ """Test that an expired user is hidden in the user directory"""
+ # Create an admin user to search the user directory
+ admin_id = self.register_user("admin", "adminpassword", admin=True)
+ admin_tok = self.login("admin", "adminpassword")
+
+ # Ensure the admin never expires
+ url = "/_matrix/client/unstable/admin/account_validity/validity"
+ params = {
+ "user_id": admin_id,
+ "expiration_ts": 999999999999,
+ "enable_renewal_emails": False,
+ }
+ request_data = json.dumps(params)
+ request, channel = self.make_request(
+ b"POST", url, request_data, access_token=admin_tok
+ )
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"200", channel.result)
+
+ # Create a user to expire
+ username = "kermit"
+ user_id = self.register_user(username, "monkey")
+ self.login(username, "monkey")
+
+ self.pump(1000)
+ self.reactor.advance(1000)
+ self.pump()
+
+ # Expire the user
+ url = "/_matrix/client/unstable/admin/account_validity/validity"
+ params = {
+ "user_id": user_id,
+ "expiration_ts": 0,
+ "enable_renewal_emails": False,
+ }
+ request_data = json.dumps(params)
+ request, channel = self.make_request(
+ b"POST", url, request_data, access_token=admin_tok
+ )
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"200", channel.result)
+
+ # Wait for the background job to run which hides expired users in the directory
+ self.pump(60 * 60 * 1000)
+
+ # Mock the homeserver's HTTP client
+ post_json = self.hs.get_simple_http_client().post_json_get_json
+
+ # Check if the homeserver has replicated the user's profile to the identity server
+ self.assertNotEquals(post_json.call_args, None, post_json.call_args)
+ payload = post_json.call_args[0][1]
+ batch = payload.get("batch")
+ self.assertNotEquals(batch, None, batch)
+ self.assertEquals(len(batch), 1, batch)
+ replicated_user_id = list(batch.keys())[0]
+ self.assertEquals(replicated_user_id, user_id, replicated_user_id)
+
+ # There was replicated information about our user
+ # Check that it's None, signifying that the user should be removed from the user
+ # directory because they were expired
+ replicated_content = batch[user_id]
+ self.assertIsNone(replicated_content)
+
+ # Now renew the user, and check they get replicated again to the identity server
+ url = "/_matrix/client/unstable/admin/account_validity/validity"
+ params = {
+ "user_id": user_id,
+ "expiration_ts": 99999999999,
+ "enable_renewal_emails": False,
+ }
+ request_data = json.dumps(params)
+ request, channel = self.make_request(
+ b"POST", url, request_data, access_token=admin_tok
+ )
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"200", channel.result)
+
+ self.pump(10)
+ self.reactor.advance(10)
+ self.pump()
+
+ # Check if the homeserver has replicated the user's profile to the identity server
+ post_json = self.hs.get_simple_http_client().post_json_get_json
+ self.assertNotEquals(post_json.call_args, None, post_json.call_args)
+ payload = post_json.call_args[0][1]
+ batch = payload.get("batch")
+ self.assertNotEquals(batch, None, batch)
+ self.assertEquals(len(batch), 1, batch)
+ replicated_user_id = list(batch.keys())[0]
+ self.assertEquals(replicated_user_id, user_id, replicated_user_id)
+
+ # There was replicated information about our user
+ # Check that it's not None, signifying that the user is back in the user
+ # directory
+ replicated_content = batch[user_id]
+ self.assertIsNotNone(replicated_content)
+
+
class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
servlets = [
@@ -451,7 +629,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
"POST", "account/deactivate", request_data, access_token=tok
)
self.render(request)
- self.assertEqual(request.code, 200)
+ self.assertEqual(request.code, 200, channel.result)
self.reactor.advance(datetime.timedelta(days=8).total_seconds())
@@ -472,7 +650,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
added_at=now,
)
)
- return (user_id, tok)
+ return user_id, tok
def test_manual_email_send_expired_account(self):
user_id = self.register_user("kermit", "monkey")
diff --git a/tests/rulecheck/__init__.py b/tests/rulecheck/__init__.py
new file mode 100644
index 0000000000..a354d38ca8
--- /dev/null
+++ b/tests/rulecheck/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/rulecheck/test_domainrulecheck.py b/tests/rulecheck/test_domainrulecheck.py
new file mode 100644
index 0000000000..1accc70dc9
--- /dev/null
+++ b/tests/rulecheck/test_domainrulecheck.py
@@ -0,0 +1,334 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import json
+
+import synapse.rest.admin
+from synapse.config._base import ConfigError
+from synapse.rest.client.v1 import login, room
+from synapse.rulecheck.domain_rule_checker import DomainRuleChecker
+
+from tests import unittest
+from tests.server import make_request, render
+
+
+class DomainRuleCheckerTestCase(unittest.TestCase):
+ def test_allowed(self):
+ config = {
+ "default": False,
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ },
+ "domains_prevented_from_being_invited_to_published_rooms": ["target_two"],
+ }
+ check = DomainRuleChecker(config)
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_one", "test:target_one", None, "room", False
+ )
+ )
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_one", "test:target_two", None, "room", False
+ )
+ )
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_two", "test:target_two", None, "room", False
+ )
+ )
+
+ # User can invite internal user to a published room
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_one", "test1:target_one", None, "room", False, True
+ )
+ )
+
+ # User can invite external user to a non-published room
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_one", "test:target_two", None, "room", False, False
+ )
+ )
+
+ def test_disallowed(self):
+ config = {
+ "default": True,
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ "source_four": [],
+ },
+ }
+ check = DomainRuleChecker(config)
+ self.assertFalse(
+ check.user_may_invite(
+ "test:source_one", "test:target_three", None, "room", False
+ )
+ )
+ self.assertFalse(
+ check.user_may_invite(
+ "test:source_two", "test:target_three", None, "room", False
+ )
+ )
+ self.assertFalse(
+ check.user_may_invite(
+ "test:source_two", "test:target_one", None, "room", False
+ )
+ )
+ self.assertFalse(
+ check.user_may_invite(
+ "test:source_four", "test:target_one", None, "room", False
+ )
+ )
+
+ # User cannot invite external user to a published room
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_one", "test:target_two", None, "room", False, True
+ )
+ )
+
+ def test_default_allow(self):
+ config = {
+ "default": True,
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ },
+ }
+ check = DomainRuleChecker(config)
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_three", "test:target_one", None, "room", False
+ )
+ )
+
+ def test_default_deny(self):
+ config = {
+ "default": False,
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ },
+ }
+ check = DomainRuleChecker(config)
+ self.assertFalse(
+ check.user_may_invite(
+ "test:source_three", "test:target_one", None, "room", False
+ )
+ )
+
+ def test_config_parse(self):
+ config = {
+ "default": False,
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ },
+ }
+ self.assertEquals(config, DomainRuleChecker.parse_config(config))
+
+ def test_config_parse_failure(self):
+ config = {
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ }
+ }
+ self.assertRaises(ConfigError, DomainRuleChecker.parse_config, config)
+
+
+class DomainRuleCheckerRoomTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ hijack_auth = False
+
+ def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+ config["trusted_third_party_id_servers"] = ["localhost"]
+
+ config["spam_checker"] = {
+ "module": "synapse.rulecheck.domain_rule_checker.DomainRuleChecker",
+ "config": {
+ "default": True,
+ "domain_mapping": {},
+ "can_only_join_rooms_with_invite": True,
+ "can_only_create_one_to_one_rooms": True,
+ "can_only_invite_during_room_creation": True,
+ "can_invite_by_third_party_id": False,
+ },
+ }
+
+ hs = self.setup_test_homeserver(config=config)
+ return hs
+
+ def prepare(self, reactor, clock, hs):
+ self.admin_user_id = self.register_user("admin_user", "pass", admin=True)
+ self.admin_access_token = self.login("admin_user", "pass")
+
+ self.normal_user_id = self.register_user("normal_user", "pass", admin=False)
+ self.normal_access_token = self.login("normal_user", "pass")
+
+ self.other_user_id = self.register_user("other_user", "pass", admin=False)
+
+ def test_admin_can_create_room(self):
+ channel = self._create_room(self.admin_access_token)
+ assert channel.result["code"] == b"200", channel.result
+
+ def test_normal_user_cannot_create_empty_room(self):
+ channel = self._create_room(self.normal_access_token)
+ assert channel.result["code"] == b"403", channel.result
+
+ def test_normal_user_cannot_create_room_with_multiple_invites(self):
+ channel = self._create_room(
+ self.normal_access_token,
+ content={"invite": [self.other_user_id, self.admin_user_id]},
+ )
+ assert channel.result["code"] == b"403", channel.result
+
+ # Test that it correctly counts both normal and third party invites
+ channel = self._create_room(
+ self.normal_access_token,
+ content={
+ "invite": [self.other_user_id],
+ "invite_3pid": [{"medium": "email", "address": "foo@example.com"}],
+ },
+ )
+ assert channel.result["code"] == b"403", channel.result
+
+ # Test that it correctly rejects third party invites
+ channel = self._create_room(
+ self.normal_access_token,
+ content={
+ "invite": [],
+ "invite_3pid": [{"medium": "email", "address": "foo@example.com"}],
+ },
+ )
+ assert channel.result["code"] == b"403", channel.result
+
+ def test_normal_user_can_room_with_single_invites(self):
+ channel = self._create_room(
+ self.normal_access_token, content={"invite": [self.other_user_id]}
+ )
+ assert channel.result["code"] == b"200", channel.result
+
+ def test_cannot_join_public_room(self):
+ channel = self._create_room(self.admin_access_token)
+ assert channel.result["code"] == b"200", channel.result
+
+ room_id = channel.json_body["room_id"]
+
+ self.helper.join(
+ room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=403
+ )
+
+ def test_can_join_invited_room(self):
+ channel = self._create_room(self.admin_access_token)
+ assert channel.result["code"] == b"200", channel.result
+
+ room_id = channel.json_body["room_id"]
+
+ self.helper.invite(
+ room_id,
+ src=self.admin_user_id,
+ targ=self.normal_user_id,
+ tok=self.admin_access_token,
+ )
+
+ self.helper.join(
+ room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200
+ )
+
+ def test_cannot_invite(self):
+ channel = self._create_room(self.admin_access_token)
+ assert channel.result["code"] == b"200", channel.result
+
+ room_id = channel.json_body["room_id"]
+
+ self.helper.invite(
+ room_id,
+ src=self.admin_user_id,
+ targ=self.normal_user_id,
+ tok=self.admin_access_token,
+ )
+
+ self.helper.join(
+ room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200
+ )
+
+ self.helper.invite(
+ room_id,
+ src=self.normal_user_id,
+ targ=self.other_user_id,
+ tok=self.normal_access_token,
+ expect_code=403,
+ )
+
+ def test_cannot_3pid_invite(self):
+ """Test that unbound 3pid invites get rejected.
+ """
+ channel = self._create_room(self.admin_access_token)
+ assert channel.result["code"] == b"200", channel.result
+
+ room_id = channel.json_body["room_id"]
+
+ self.helper.invite(
+ room_id,
+ src=self.admin_user_id,
+ targ=self.normal_user_id,
+ tok=self.admin_access_token,
+ )
+
+ self.helper.join(
+ room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200
+ )
+
+ self.helper.invite(
+ room_id,
+ src=self.normal_user_id,
+ targ=self.other_user_id,
+ tok=self.normal_access_token,
+ expect_code=403,
+ )
+
+ request, channel = self.make_request(
+ "POST",
+ "rooms/%s/invite" % (room_id),
+ {"address": "foo@bar.com", "medium": "email", "id_server": "localhost"},
+ access_token=self.normal_access_token,
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 403, channel.result["body"])
+
+ def _create_room(self, token, content={}):
+ path = "/_matrix/client/r0/createRoom?access_token=%s" % (token,)
+
+ request, channel = make_request(
+ self.hs.get_reactor(),
+ "POST",
+ path,
+ content=json.dumps(content).encode("utf8"),
+ )
+ render(request, self.resource, self.hs.get_reactor())
+
+ return channel
diff --git a/tests/server.py b/tests/server.py
index e573c4e4c5..ae349ca273 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -11,9 +11,13 @@ from twisted.internet import address, threads, udp
from twisted.internet._resolver import SimpleResolverComplexifier
from twisted.internet.defer import Deferred, fail, succeed
from twisted.internet.error import DNSLookupError
-from twisted.internet.interfaces import IReactorPluggableNameResolver, IResolverSimple
+from twisted.internet.interfaces import (
+ IReactorPluggableNameResolver,
+ IReactorTCP,
+ IResolverSimple,
+)
from twisted.python.failure import Failure
-from twisted.test.proto_helpers import MemoryReactorClock
+from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
from twisted.web.http import unquote
from twisted.web.http_headers import Headers
@@ -334,7 +338,7 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs):
def get_clock():
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
- return (clock, hs_clock)
+ return clock, hs_clock
@attr.s(cmp=False)
@@ -387,11 +391,24 @@ class FakeTransport(object):
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(reason)
- self.disconnected = True
+
+ # if we still have data to write, delay until that is done
+ if self.buffer:
+ logger.info(
+ "FakeTransport: Delaying disconnect until buffer is flushed"
+ )
+ else:
+ self.disconnected = True
def abortConnection(self):
logger.info("FakeTransport: abortConnection()")
- self.loseConnection()
+
+ if not self.disconnecting:
+ self.disconnecting = True
+ if self._protocol:
+ self._protocol.connectionLost(None)
+
+ self.disconnected = True
def pauseProducing(self):
if not self.producer:
@@ -422,6 +439,9 @@ class FakeTransport(object):
self._reactor.callLater(0.0, _produce)
def write(self, byt):
+ if self.disconnecting:
+ raise Exception("Writing to disconnecting FakeTransport")
+
self.buffer = self.buffer + byt
# always actually do the write asynchronously. Some protocols (notably the
@@ -465,3 +485,26 @@ class FakeTransport(object):
self.buffer = self.buffer[len(to_write) :]
if self.buffer and self.autoflush:
self._reactor.callLater(0.0, self.flush)
+
+ if not self.buffer and self.disconnecting:
+ logger.info("FakeTransport: Buffer now empty, completing disconnect")
+ self.disconnected = True
+
+
+def connect_client(reactor: IReactorTCP, client_id: int) -> AccumulatingProtocol:
+ """
+ Connect a client to a fake TCP transport.
+
+ Args:
+ reactor
+ factory: The connecting factory to build.
+ """
+ factory = reactor.tcpClients[client_id][2]
+ client = factory.buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, reactor))
+ client.makeConnection(FakeTransport(server, reactor))
+
+ reactor.tcpClients.pop(client_id)
+
+ return client, server
diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py
index e07ff01201..95f309fbbc 100644
--- a/tests/storage/test_keys.py
+++ b/tests/storage/test_keys.py
@@ -14,6 +14,7 @@
# limitations under the License.
import signedjson.key
+import unpaddedbase64
from twisted.internet.defer import Deferred
@@ -21,11 +22,17 @@ from synapse.storage.keys import FetchKeyResult
import tests.unittest
-KEY_1 = signedjson.key.decode_verify_key_base64(
- "ed25519", "key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw"
+
+def decode_verify_key_base64(key_id: str, key_base64: str):
+ key_bytes = unpaddedbase64.decode_base64(key_base64)
+ return signedjson.key.decode_verify_key_bytes(key_id, key_bytes)
+
+
+KEY_1 = decode_verify_key_base64(
+ "ed25519:key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw"
)
-KEY_2 = signedjson.key.decode_verify_key_base64(
- "ed25519", "key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
+KEY_2 = decode_verify_key_base64(
+ "ed25519:key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
)
diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py
index 45824bd3b2..13e9f8ec09 100644
--- a/tests/storage/test_profile.py
+++ b/tests/storage/test_profile.py
@@ -34,9 +34,7 @@ class ProfileStoreTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_displayname(self):
- yield self.store.create_profile(self.u_frank.localpart)
-
- yield self.store.set_profile_displayname(self.u_frank.localpart, "Frank")
+ yield self.store.set_profile_displayname(self.u_frank.localpart, "Frank", 1)
self.assertEquals(
"Frank", (yield self.store.get_profile_displayname(self.u_frank.localpart))
@@ -44,10 +42,8 @@ class ProfileStoreTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_avatar_url(self):
- yield self.store.create_profile(self.u_frank.localpart)
-
yield self.store.set_profile_avatar_url(
- self.u_frank.localpart, "http://my.site/here"
+ self.u_frank.localpart, "http://my.site/here", 1
)
self.assertEquals(
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index d961b81d48..deecfad9fb 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -17,6 +17,8 @@
from mock import Mock
+from canonicaljson import json
+
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
@@ -29,8 +31,10 @@ from tests.utils import create_room
class RedactionTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+ config["redaction_retention_period"] = "30d"
return self.setup_test_homeserver(
- resource_for_federation=Mock(), http_client=None
+ resource_for_federation=Mock(), http_client=None, config=config
)
def prepare(self, reactor, clock, hs):
@@ -286,3 +290,74 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.assertEqual(
fetched.unsigned["redacted_because"].event_id, redaction_event_id2
)
+
+ def test_redact_censor(self):
+ """Test that a redacted event gets censored in the DB after a month
+ """
+
+ self.get_success(
+ self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
+ )
+
+ msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t"))
+
+ # Check event has not been redacted:
+ event = self.get_success(self.store.get_event(msg_event.event_id))
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Message,
+ "user_id": self.u_alice.to_string(),
+ "content": {"body": "t", "msgtype": "message"},
+ },
+ event,
+ )
+
+ self.assertFalse("redacted_because" in event.unsigned)
+
+ # Redact event
+ reason = "Because I said so"
+ self.get_success(
+ self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
+ )
+
+ event = self.get_success(self.store.get_event(msg_event.event_id))
+
+ self.assertTrue("redacted_because" in event.unsigned)
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Message,
+ "user_id": self.u_alice.to_string(),
+ "content": {},
+ },
+ event,
+ )
+
+ event_json = self.get_success(
+ self.store._simple_select_one_onecol(
+ table="event_json",
+ keyvalues={"event_id": msg_event.event_id},
+ retcol="json",
+ )
+ )
+
+ self.assert_dict(
+ {"content": {"body": "t", "msgtype": "message"}}, json.loads(event_json)
+ )
+
+ # Advance by 30 days, then advance again to ensure that the looping call
+ # for updating the stream position gets called and then the looping call
+ # for the censoring gets called.
+ self.reactor.advance(60 * 60 * 24 * 31)
+ self.reactor.advance(60 * 60 * 2)
+
+ event_json = self.get_success(
+ self.store._simple_select_one_onecol(
+ table="event_json",
+ keyvalues={"event_id": msg_event.event_id},
+ retcol="json",
+ )
+ )
+
+ self.assert_dict({"content": {}}, json.loads(event_json))
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index 0253c4ac05..4578cc3b60 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -49,6 +49,7 @@ class RegistrationStoreTestCase(unittest.TestCase):
"consent_server_notice_sent": None,
"appservice_id": None,
"creation_ts": 1000,
+ "user_type": None,
},
(yield self.store.get_user_by_id(self.user_id)),
)
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 64cb294c37..447a3c6ffb 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,78 +14,129 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from mock import Mock
-
-from twisted.internet import defer
+from unittest.mock import Mock
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
-from synapse.types import Requester, RoomID, UserID
+from synapse.rest.admin import register_servlets_for_client_rest_resource
+from synapse.rest.client.v1 import login, room
+from synapse.types import Requester, UserID
from tests import unittest
-from tests.utils import create_room, setup_test_homeserver
-class RoomMemberStoreTestCase(unittest.TestCase):
- @defer.inlineCallbacks
- def setUp(self):
- hs = yield setup_test_homeserver(
- self.addCleanup, resource_for_federation=Mock(), http_client=None
+class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ login.register_servlets,
+ register_servlets_for_client_rest_resource,
+ room.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ hs = self.setup_test_homeserver(
+ resource_for_federation=Mock(), http_client=None
)
+ return hs
+
+ def prepare(self, reactor, clock, hs):
+
# We can't test the RoomMemberStore on its own without the other event
# storage logic
self.store = hs.get_datastore()
self.event_builder_factory = hs.get_event_builder_factory()
self.event_creation_handler = hs.get_event_creation_handler()
- self.u_alice = UserID.from_string("@alice:test")
- self.u_bob = UserID.from_string("@bob:test")
+ self.u_alice = self.register_user("alice", "pass")
+ self.t_alice = self.login("alice", "pass")
+ self.u_bob = self.register_user("bob", "pass")
# User elsewhere on another host
self.u_charlie = UserID.from_string("@charlie:elsewhere")
- self.room = RoomID.from_string("!abc123:test")
-
- yield create_room(hs, self.room.to_string(), self.u_alice.to_string())
-
- @defer.inlineCallbacks
def inject_room_member(self, room, user, membership, replaces_state=None):
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": EventTypes.Member,
- "sender": user.to_string(),
- "state_key": user.to_string(),
- "room_id": room.to_string(),
+ "sender": user,
+ "state_key": user,
+ "room_id": room,
"content": {"membership": membership},
},
)
- event, context = yield self.event_creation_handler.create_new_client_event(
- builder
+ event, context = self.get_success(
+ self.event_creation_handler.create_new_client_event(builder)
)
- yield self.store.persist_event(event, context)
+ self.get_success(self.store.persist_event(event, context))
return event
- @defer.inlineCallbacks
def test_one_member(self):
- yield self.inject_room_member(self.room, self.u_alice, Membership.JOIN)
-
- self.assertEquals(
- [self.room.to_string()],
- [
- m.room_id
- for m in (
- yield self.store.get_rooms_for_user_where_membership_is(
- self.u_alice.to_string(), [Membership.JOIN]
- )
- )
- ],
+
+ # Alice creates the room, and is automatically joined
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+
+ rooms_for_user = self.get_success(
+ self.store.get_rooms_for_user_where_membership_is(
+ self.u_alice, [Membership.JOIN]
+ )
)
+ self.assertEquals([self.room], [m.room_id for m in rooms_for_user])
+
+ def test_count_known_servers(self):
+ """
+ _count_known_servers will calculate how many servers are in a room.
+ """
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+ self.inject_room_member(self.room, self.u_bob, Membership.JOIN)
+ self.inject_room_member(self.room, self.u_charlie.to_string(), Membership.JOIN)
+
+ servers = self.get_success(self.store._count_known_servers())
+ self.assertEqual(servers, 2)
+
+ def test_count_known_servers_stat_counter_disabled(self):
+ """
+ If enabled, the metrics for how many servers are known will be counted.
+ """
+ self.assertTrue("_known_servers_count" not in self.store.__dict__.keys())
+
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+ self.inject_room_member(self.room, self.u_bob, Membership.JOIN)
+ self.inject_room_member(self.room, self.u_charlie.to_string(), Membership.JOIN)
+
+ self.pump(20)
+
+ self.assertTrue("_known_servers_count" not in self.store.__dict__.keys())
+
+ @unittest.override_config(
+ {"enable_metrics": True, "metrics_flags": {"known_servers": True}}
+ )
+ def test_count_known_servers_stat_counter_enabled(self):
+ """
+ If enabled, the metrics for how many servers are known will be counted.
+ """
+ # Initialises to 1 -- itself
+ self.assertEqual(self.store._known_servers_count, 1)
+
+ self.pump(20)
+
+ # No rooms have been joined, so technically the SQL returns 0, but it
+ # will still say it knows about itself.
+ self.assertEqual(self.store._known_servers_count, 1)
+
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+ self.inject_room_member(self.room, self.u_bob, Membership.JOIN)
+ self.inject_room_member(self.room, self.u_charlie.to_string(), Membership.JOIN)
+
+ self.pump(20)
+
+ # It now knows about Charlie's server.
+ self.assertEqual(self.store._known_servers_count, 2)
+
class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor, clock, homeserver):
diff --git a/tests/test_metrics.py b/tests/test_metrics.py
index 2edbae5c6d..270f853d60 100644
--- a/tests/test_metrics.py
+++ b/tests/test_metrics.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
+# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,8 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from synapse.metrics import InFlightGauge
+from synapse.metrics import REGISTRY, InFlightGauge, generate_latest
from tests import unittest
@@ -111,3 +111,21 @@ class TestMauLimit(unittest.TestCase):
}
return results
+
+
+class BuildInfoTests(unittest.TestCase):
+ def test_get_build(self):
+ """
+ The synapse_build_info metric reports the OS version, Python version,
+ and Synapse version.
+ """
+ items = list(
+ filter(
+ lambda x: b"synapse_build_info{" in x,
+ generate_latest(REGISTRY).split(b"\n"),
+ )
+ )
+ self.assertEqual(len(items), 1)
+ self.assertTrue(b"osversion=" in items[0])
+ self.assertTrue(b"pythonversion=" in items[0])
+ self.assertTrue(b"version=" in items[0])
diff --git a/tests/test_server.py b/tests/test_server.py
index 2a7d407c98..98fef21d55 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -57,7 +57,7 @@ class JsonResourceTests(unittest.TestCase):
def _callback(request, **kwargs):
got_kwargs.update(kwargs)
- return (200, kwargs)
+ return 200, kwargs
res = JsonResource(self.homeserver)
res.register_paths(
diff --git a/tests/test_state.py b/tests/test_state.py
index 6d33566f47..610ec9fb46 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -106,7 +106,7 @@ class StateGroupStore(object):
}
def get_state_group_delta(self, name):
- return (None, None)
+ return None, None
def register_events(self, events):
for e in events:
diff --git a/tests/test_types.py b/tests/test_types.py
index 9ab5f829b0..7cb1f8acb4 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -12,9 +12,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from six import string_types
from synapse.api.errors import SynapseError
-from synapse.types import GroupID, RoomAlias, UserID, map_username_to_mxid_localpart
+from synapse.types import (
+ GroupID,
+ RoomAlias,
+ UserID,
+ map_username_to_mxid_localpart,
+ strip_invalid_mxid_characters,
+)
from tests import unittest
from tests.utils import TestHomeServer
@@ -106,3 +113,16 @@ class MapUsernameTestCase(unittest.TestCase):
self.assertEqual(
map_username_to_mxid_localpart("têst".encode("utf-8")), "t=c3=aast"
)
+
+
+class StripInvalidMxidCharactersTestCase(unittest.TestCase):
+ def test_return_type(self):
+ unstripped = strip_invalid_mxid_characters("test")
+ stripped = strip_invalid_mxid_characters("test@")
+
+ self.assertTrue(isinstance(unstripped, string_types), type(unstripped))
+ self.assertTrue(isinstance(stripped, string_types), type(stripped))
+
+ def test_strip(self):
+ stripped = strip_invalid_mxid_characters("test@")
+ self.assertEqual(stripped, "test", stripped)
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index e0605dac2f..18f1a0035d 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -74,7 +74,6 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertEqual(filtered[i].content["a"], "b")
- @tests.unittest.DEBUG
@defer.inlineCallbacks
def test_erased_user(self):
# 4 message events, from erased and unerased users, with a membership
diff --git a/tests/util/caches/test_ttlcache.py b/tests/util/caches/test_ttlcache.py
index c94cbb662b..816795c136 100644
--- a/tests/util/caches/test_ttlcache.py
+++ b/tests/util/caches/test_ttlcache.py
@@ -36,7 +36,7 @@ class CacheTestCase(unittest.TestCase):
self.assertTrue("one" in self.cache)
self.assertEqual(self.cache.get("one"), "1")
self.assertEqual(self.cache["one"], "1")
- self.assertEqual(self.cache.get_with_expiry("one"), ("1", 110))
+ self.assertEqual(self.cache.get_with_expiry("one"), ("1", 110, 10))
self.assertEqual(self.cache._metrics.hits, 3)
self.assertEqual(self.cache._metrics.misses, 0)
@@ -77,7 +77,7 @@ class CacheTestCase(unittest.TestCase):
self.assertEqual(self.cache["two"], "2")
self.assertEqual(self.cache["three"], "3")
- self.assertEqual(self.cache.get_with_expiry("two"), ("2", 120))
+ self.assertEqual(self.cache.get_with_expiry("two"), ("2", 120, 20))
self.assertEqual(self.cache._metrics.hits, 5)
self.assertEqual(self.cache._metrics.misses, 0)
diff --git a/tests/utils.py b/tests/utils.py
index f1eb9a545c..46ef2959f2 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -464,7 +464,7 @@ class MockHttpResource(HttpServer):
args = [urlparse.unquote(u) for u in matcher.groups()]
(code, response) = yield func(mock_request, *args)
- return (code, response)
+ return code, response
except CodeMessageException as e:
return (e.code, cs_error(e.msg, code=e.errcode))
diff --git a/tox.ini b/tox.ini
index 09b4b8fc3c..03119ac686 100644
--- a/tox.ini
+++ b/tox.ini
@@ -2,11 +2,13 @@
envlist = packaging, py35, py36, py37, check_codestyle, check_isort
[base]
+basepython = python3.7
deps =
mock
python-subunit
junitxml
coverage
+ coverage-enable-subprocess
parameterized
# cyptography 2.2 requires setuptools >= 18.5
@@ -43,13 +45,13 @@ whitelist_externals =
setenv =
{[base]setenv}
postgres: SYNAPSE_POSTGRES = 1
+ TOP={toxinidir}
passenv = *
commands =
/usr/bin/find "{toxinidir}" -name '*.pyc' -delete
# Add this so that coverage will run on subprocesses
- sh -c 'echo "import coverage; coverage.process_startup()" > {envsitepackagesdir}/../sitecustomize.py'
{envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
# As of twisted 16.4, trial tries to import the tests as a package (previously
@@ -75,8 +77,6 @@ commands =
# )
usedevelop=true
-
-
# A test suite for the oldest supported versions of Python libraries, to catch
# any uses of APIs not available in them.
[testenv:py35-old]
@@ -88,6 +88,7 @@ deps =
mock
lxml
coverage
+ coverage-enable-subprocess
commands =
/usr/bin/find "{toxinidir}" -name '*.pyc' -delete
@@ -96,15 +97,11 @@ commands =
# OpenSSL 1.1 compiled cryptography (as older ones don't compile on Travis).
/bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs -d"\n" pip install'
- # Add this so that coverage will run on subprocesses
- /bin/sh -c 'echo "import coverage; coverage.process_startup()" > {envsitepackagesdir}/../sitecustomize.py'
-
# Install Synapse itself. This won't update any libraries.
pip install -e .
{envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
-
[testenv:packaging]
skip_install=True
deps =
@@ -117,7 +114,7 @@ skip_install = True
basepython = python3.6
deps =
flake8
- black
+ black==19.3b0
commands =
python -m black --check --diff .
/bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/hash_password scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}"
@@ -131,18 +128,45 @@ commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests"
skip_install = True
deps = towncrier>=18.6.0rc1
commands =
- python -m towncrier.check --compare-with=origin/develop
+ python -m towncrier.check --compare-with=origin/dinsic
basepython = python3.6
[testenv:check-sampleconfig]
commands = {toxinidir}/scripts-dev/generate_sample_config --check
-[testenv:codecov]
+[testenv:combine]
skip_install = True
deps =
coverage
- codecov
-commands =
+commands=
coverage combine
- coverage xml
- codecov -X gcov
+ coverage report
+
+[testenv:cov-erase]
+skip_install = True
+deps =
+ coverage
+commands=
+ coverage erase
+
+[testenv:cov-html]
+skip_install = True
+deps =
+ coverage
+commands=
+ coverage html
+
+[testenv:mypy]
+basepython = python3.7
+skip_install = True
+deps =
+ {[base]deps}
+ mypy
+ mypy-zope
+ typeshed
+env =
+ MYPYPATH = stubs/
+extras = all
+commands = mypy --show-traceback \
+ synapse/logging/ \
+ synapse/config/
|