From 95e02b856b2ac1409dc0d5575ee0b7be55105e9a Mon Sep 17 00:00:00 2001 From: Robert Swain Date: Thu, 28 Sep 2017 12:12:47 +0200 Subject: docker: Initial Dockerfile and docker-compose.yaml --- Dockerfile | 58 ++++++++++++++++++++++++ docker/README.md | 70 +++++++++++++++++++++++++++++ docker/docker-compose.yaml | 39 +++++++++++++++++ docker/rootfs/etc/service/synapse/finish | 17 ++++++++ docker/rootfs/etc/service/synapse/run | 75 ++++++++++++++++++++++++++++++++ 5 files changed, 259 insertions(+) create mode 100644 Dockerfile create mode 100644 docker/README.md create mode 100644 docker/docker-compose.yaml create mode 100755 docker/rootfs/etc/service/synapse/finish create mode 100755 docker/rootfs/etc/service/synapse/run diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..9b11a143f6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,58 @@ +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM phusion/baseimage:0.9.22 + +COPY ./ /synapse/source/ + +RUN apt-get update -y \ + && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + libffi-dev \ + libjpeg-dev \ + libpq-dev \ + libssl-dev \ + libxslt1-dev \ + python-pip \ + python-setuptools \ + python-virtualenv \ + python2.7-dev \ + sqlite3 \ + && virtualenv -p python2.7 /synapse \ + && . /synapse/bin/activate \ + && pip install --upgrade pip \ + && pip install --upgrade setuptools \ + && pip install --upgrade psycopg2 \ + && cd /synapse/source \ + && pip install --upgrade ./ \ + && cd / \ + && rm -rf /synapse/source \ + && apt-get autoremove -y \ + build-essential \ + libffi-dev \ + libjpeg-dev \ + libpq-dev \ + libssl-dev \ + libxslt1-dev \ + python2.7-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +COPY docker/rootfs/ / + +VOLUME /synapse/config/ +VOLUME /synapse/data/ + +CMD ["/sbin/my_init"] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000000..c9e6fd216d --- /dev/null +++ b/docker/README.md @@ -0,0 +1,70 @@ +# Synapse Docker + +## Build + +Build the docker image with the `docker build` command from the root of the synapse repository. + +``` +docker build -t matrix-org/synapse:v0.22.1 . +``` + +The `-t` option sets the image tag. Official images are tagged `matrix-org/synapse:` where `` is the same as the release tag in the synapse git repository. + +## Configure + +Synapse provides a command for generating homeserver configuration files. These are a good starting point for setting up your own deployment. + +The documentation below will refer to a `CONFIG_PATH` shell variable. This is a path to a directory where synapse configuration will be stored. It needs to be mapped into the container as a volume at `/synapse/config/` as can be seen in the example `docker run` command. + +Docker container environment variables: +* `GENERATE_CONFIG` - Set this to any non-empty string, such as `yes`, to trigger generation of configuration files. Existing files in the `CONFIG_PATH` will **not** be overwritten. +* `POSTGRES_DATABASE` - The database name for the synapse postgres database. [default: `synapse`] +* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `postgres` which is useful when using a container on the same docker network in a compose file where the postgres service is called `postgres`] **NOTE**: `localhost` and `127.0.0.1` refer to the container itself unless running the container with `host` networking. +* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy. +* `POSTGRES_USER` - The user for the synapse postgres database. [default: `postgres`] +* `REPORT_STATS` - Whether to send anonymous usage statistics back to the Matrix project which helps us to get funding! Must be `yes` or `no`. [default: `yes`] +* `SERVER_NAME` - The domain used for the Matrix homeserver. If you intend to run this synapse instance on a public domain, use that domain. [default: `localhost`] + +``` +CONFIG_PATH=/my/magical/config/path/ +mkdir -p ${CONFIG_PATH} +docker run \ + --rm \ + -e GENERATE_CONFIG=yes \ + -e POSTGRES_PASSWORD=MyVerySecretPassword \ + -e REPORT_STATS=yes \ + -e SERVER_NAME=example.com \ + -v ${CONFIG_PATH}:/synapse/config/ \ + matrix-org/synapse:develop +``` + +This will create a temporary container from the image and use the synapse code for generating configuration files and TLS keys and certificates for the specified `SERVER_NAME` domain. The files are written to `CONFIG_PATH`. + +## Run + +**NOTE**: If you are not using postgresql and are using sqlite3 as your database, you will need to make a directory to store the sqlite3 database file in and then mount this volume into the container at `/synapse/data/`. As it is so easy to use postgresql, when using Docker containers, this is not documented to somewhat discourage it. Choose a `POSTGRES_PASSWORD` instead. + +### Docker Compose + +A `docker-compose.yaml` file is included to ease deployment of the basic synapse and postgres setup. Remember to set a `POSTGRES_PASSWORD` when generating your configuration above. You will need it for running the containers in the composition. + +From the `docker/` subdirectory of the synapse repository: +``` +CONFIG_PATH=/my/magical/config/path/ +POSTGRES_PASSWORD=MyVerySecretPassword \ +docker-compose \ + -p synapse \ + up -d +``` + +### Docker + +Note that the following is just a guideline and you may need to add parameters to the docker run command to account for the network situation with your postgres database. + +``` +docker run \ + -d \ + --name synapse \ + -v ${CONFIG_PATH}:/synapse/config/ \ + matrix-org/synapse:v0.22.1 +``` diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml new file mode 100644 index 0000000000..ff36081a9b --- /dev/null +++ b/docker/docker-compose.yaml @@ -0,0 +1,39 @@ +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: '3' + +services: + postgres: + image: postgres:9.6.5-alpine + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: synapse + expose: + - 5432 + restart: unless-stopped + volumes: + - postgres-data:/var/lib/postgresql/data/ + + synapse: + image: matrix-org/synapse:develop + ports: + - 8008:8008 + - 8448:8448 + restart: unless-stopped + volumes: + - ${CONFIG_PATH}:/synapse/config/ + +volumes: + postgres-data: diff --git a/docker/rootfs/etc/service/synapse/finish b/docker/rootfs/etc/service/synapse/finish new file mode 100755 index 0000000000..2aace581a1 --- /dev/null +++ b/docker/rootfs/etc/service/synapse/finish @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kill -TERM 1 diff --git a/docker/rootfs/etc/service/synapse/run b/docker/rootfs/etc/service/synapse/run new file mode 100755 index 0000000000..dd797d3ef9 --- /dev/null +++ b/docker/rootfs/etc/service/synapse/run @@ -0,0 +1,75 @@ +#!/bin/bash +# +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +: ${CONFIG_PATH:="/synapse/config"} +: ${POSTGRES_DATABASE:="synapse"} +: ${POSTGRES_HOST:="postgres"} +: ${POSTGRES_USER:="postgres"} +: ${REPORT_STATS:="yes"} +: ${SERVER_NAME:="localhost"} + +DATABASE_CONFIG_PATH="${CONFIG_PATH}/database.yaml" +HOMESERVER_CONFIG_PATH="${CONFIG_PATH}/homeserver.yaml" +SYNAPSE_COMMAND="python -m synapse.app.homeserver" + +. /synapse/bin/activate +cd /synapse + +if [[ -n "${GENERATE_CONFIG}" ]]; then + ${SYNAPSE_COMMAND} \ + --server-name ${SERVER_NAME} \ + --config-path ${HOMESERVER_CONFIG_PATH} \ + --generate-config \ + --report-stats=${REPORT_STATS} + + if [[ -f "${DATABASE_CONFIG_PATH}" ]]; then + echo "Config file '${DATABASE_CONFIG_PATH}' already exists. Remove it if you want it to be generated." + else + echo "Generating ${DATABASE_CONFIG_PATH}..." + if [[ -n "${POSTGRES_PASSWORD}" ]]; then + (cat > ${DATABASE_CONFIG_PATH}) < ${DATABASE_CONFIG_PATH}) < Date: Fri, 29 Sep 2017 11:40:15 +0200 Subject: docker: s/matrix-org/matrixdotorg/g --- docker/README.md | 8 ++++---- docker/docker-compose.yaml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/README.md b/docker/README.md index c9e6fd216d..c15517d0e0 100644 --- a/docker/README.md +++ b/docker/README.md @@ -5,10 +5,10 @@ Build the docker image with the `docker build` command from the root of the synapse repository. ``` -docker build -t matrix-org/synapse:v0.22.1 . +docker build -t matrixdotorg/synapse:v0.22.1 . ``` -The `-t` option sets the image tag. Official images are tagged `matrix-org/synapse:` where `` is the same as the release tag in the synapse git repository. +The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:` where `` is the same as the release tag in the synapse git repository. ## Configure @@ -35,7 +35,7 @@ docker run \ -e REPORT_STATS=yes \ -e SERVER_NAME=example.com \ -v ${CONFIG_PATH}:/synapse/config/ \ - matrix-org/synapse:develop + matrixdotorg/synapse:v0.22.1 ``` This will create a temporary container from the image and use the synapse code for generating configuration files and TLS keys and certificates for the specified `SERVER_NAME` domain. The files are written to `CONFIG_PATH`. @@ -66,5 +66,5 @@ docker run \ -d \ --name synapse \ -v ${CONFIG_PATH}:/synapse/config/ \ - matrix-org/synapse:v0.22.1 + matrixdotorg/synapse:v0.22.1 ``` diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index ff36081a9b..73cc29f8fd 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -27,7 +27,7 @@ services: - postgres-data:/var/lib/postgresql/data/ synapse: - image: matrix-org/synapse:develop + image: matrixdotorg/synapse:v0.22.1 ports: - 8008:8008 - 8448:8448 -- cgit 1.4.1 From 431476fbc4ef0c740e33e19ccc73996c2412e4f9 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sat, 3 Feb 2018 20:18:36 +0100 Subject: Initial commit including a Dockerfile for synapse --- Dockerfile | 16 ++++++++++++++++ contrib/docker/start.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 Dockerfile create mode 100755 contrib/docker/start.py diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..5f0433004f --- /dev/null +++ b/Dockerfile @@ -0,0 +1,16 @@ +FROM python:2-alpine + +RUN apk add --no-cache --virtual .nacl_deps build-base libffi-dev zlib-dev openssl-dev libjpeg-turbo-dev linux-headers + +COPY synapse /usr/local/src/synapse +COPY setup.py setup.cfg README.rst synctl /usr/local/src/ + +RUN cd /usr/local/src \ + && pip install --upgrade --process-dependency-links . \ + && rm -rf setup.py setup.cfg synapse + +COPY contrib/docker / + +VOLUME ["/data"] + +ENTRYPOINT ["/start.py"] diff --git a/contrib/docker/start.py b/contrib/docker/start.py new file mode 100755 index 0000000000..4f63ea1ad5 --- /dev/null +++ b/contrib/docker/start.py @@ -0,0 +1,29 @@ +#!/usr/local/bin/python + +import jinja2 +import os +import sys +import socket + +convert = lambda src, dst: open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ)) +mode = sys.argv[1] if len(sys.argv) > 1 else None + +if "SYNAPSE_SERVER_NAME" not in os.environ: + print("Environment variable SYNAPSE_SERVER_NAME is mandatory, exiting.") + sys.exit(2) + +params = ["--server-name", os.environ.get("SYNAPSE_SERVER_NAME"), + "--report-stats", os.environ.get("SYNAPSE_REPORT_STATS", "no"), + "--config-path", os.environ.get("SYNAPSE_CONFIG_PATH", "/compiled/homeserver.yaml")] + +if mode == "generate": + params.append("--generate-config") + +# Parse the configuration file +if not os.path.exists("/compiled"): + os.mkdir("/compiled") +convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml") +convert("/conf/log.config", "/compiled/%s.log.config" % os.environ.get("SYNAPSE_SERVER_NAME")) + +# TODO, replace with a call to synapse.app.homeserver.run() +os.execv("/usr/local/bin/python", ["python", "-m", "synapse.app.homeserver"] + params) -- cgit 1.4.1 From d434ae33875b4c7d5ee04c45e454ce237ce578f8 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sat, 3 Feb 2018 20:30:08 +0100 Subject: Add template config files for the Docker image --- contrib/docker/conf/homeserver.yaml | 181 ++++++++++++++++++++++++++++++++++++ contrib/docker/conf/log.config | 36 +++++++ 2 files changed, 217 insertions(+) create mode 100644 contrib/docker/conf/homeserver.yaml create mode 100644 contrib/docker/conf/log.config diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml new file mode 100644 index 0000000000..851c389c19 --- /dev/null +++ b/contrib/docker/conf/homeserver.yaml @@ -0,0 +1,181 @@ +# vim:ft=yaml + +## TLS ## + +tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt" +tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key" +tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh" +no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }} +tls_fingerprints: [] + +## Server ## + +server_name: "{{ SYNAPSE_SERVER_NAME }}" +pid_file: /homeserver.pid +web_client: {{ "True" if SYNAPSE_WEB_CLIENT else "False" }} +soft_file_limit: 0 + +## Ports ## + +listeners: + {% if not SYNAPSE_NO_TLS %} + - + port: 8448 + bind_addresses: ['0.0.0.0'] + type: http + tls: true + x_forwarded: false + resources: + - names: [client, webclient] + compress: true + - names: [federation] # Federation APIs + compress: false + {% endif %} + + - port: 8008 + tls: false + bind_addresses: ['0.0.0.0'] + type: http + x_forwarded: false + + resources: + - names: [client, webclient] + compress: true + - names: [federation] + compress: false + +## Database ## + +{% if SYNAPSE_DB_HOST %} + +{% else %} +database: + name: "sqlite3" + args: + database: "/data/homeserver.db" +{% endif %} + +## Performance ## + +event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}" +verbose: 0 +log_file: "/data/homeserver.log" +log_config: "/data/{{ SYNAPSE_SERVER_NAME }}.log.config" + +## Ratelimiting ## + +rc_messages_per_second: 0.2 +rc_message_burst_count: 10.0 +federation_rc_window_size: 1000 +federation_rc_sleep_limit: 10 +federation_rc_sleep_delay: 500 +federation_rc_reject_limit: 50 +federation_rc_concurrent: 3 + +## Files ## + +media_store_path: "/data/media" +uploads_path: "/data/uploads" +max_upload_size: "10M" +max_image_pixels: "32M" +dynamic_thumbnails: false + +# List of thumbnail to precalculate when an image is uploaded. +thumbnail_sizes: +- width: 32 + height: 32 + method: crop +- width: 96 + height: 96 + method: crop +- width: 320 + height: 240 + method: scale +- width: 640 + height: 480 + method: scale +- width: 800 + height: 600 + method: scale + +url_preview_enabled: False +max_spider_size: "10M" + +## Captcha ## + +recaptcha_public_key: "YOUR_PUBLIC_KEY" +recaptcha_private_key: "YOUR_PRIVATE_KEY" +enable_registration_captcha: False +recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify" + +## Turn ## + +turn_uris: [] +turn_shared_secret: "YOUR_SHARED_SECRET" +turn_user_lifetime: "1h" +turn_allow_guests: True + +## Registration ## + +enable_registration: {{ "True" if SYNAPSE_ENABLE_REGISTRATION else "False" }} +registration_shared_secret: "{{ SYNAPSE_REGISTRATION_SHARED_SECRET }}" +bcrypt_rounds: 12 +allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }} + +# The list of identity servers trusted to verify third party +# identifiers by this server. +trusted_third_party_id_servers: + - matrix.org + - vector.im + - riot.im + +## Metrics ### + +enable_metrics: False +report_stats: False + +## API Configuration ## + +room_invite_state_types: + - "m.room.join_rules" + - "m.room.canonical_alias" + - "m.room.avatar" + - "m.room.name" + +app_service_config_files: [] +macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}" +expire_access_token: False + +## Signing Keys ## + +signing_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.signing.key" +old_signing_keys: {} +key_refresh_interval: "1d" # 1 Day. + +# The trusted servers to download signing keys from. +perspectives: + servers: + "matrix.org": + verify_keys: + "ed25519:auto": + key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" + +password_config: + enabled: true + +#email: +# enable_notifs: false +# smtp_host: "localhost" +# smtp_port: 25 +# smtp_user: "exampleusername" +# smtp_pass: "examplepassword" +# require_transport_security: False +# notif_from: "Your Friendly %(app)s Home Server " +# app_name: Matrix +# template_dir: res/templates +# notif_template_html: notif_mail.html +# notif_template_text: notif_mail.txt +# notif_for_new_users: True +# riot_base_url: "http://localhost/riot" + +enable_group_creation: true diff --git a/contrib/docker/conf/log.config b/contrib/docker/conf/log.config new file mode 100644 index 0000000000..45e7eef953 --- /dev/null +++ b/contrib/docker/conf/log.config @@ -0,0 +1,36 @@ +version: 1 + +formatters: + precise: + format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s' + +filters: + context: + (): synapse.util.logcontext.LoggingContextFilter + request: "" + +handlers: + file: + class: logging.handlers.RotatingFileHandler + formatter: precise + filename: /data/homeserver.log + maxBytes: 104857600 + backupCount: 10 + filters: [context] + console: + class: logging.StreamHandler + formatter: precise + filters: [context] + +loggers: + synapse: + level: INFO + + synapse.storage.SQL: + # beware: increasing this to DEBUG will make synapse log sensitive + # information such as access tokens. + level: INFO + +root: + level: INFO + handlers: [file, console] -- cgit 1.4.1 From 48bc22f89dadb8278cf2b8c940604534999d246f Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 10:58:07 +0100 Subject: Allow for a wheel cache and include missing files in the build --- .dockerignore | 5 +++++ Dockerfile | 13 +++++++------ contrib/docker/start.py | 1 + 3 files changed, 13 insertions(+), 6 deletions(-) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..f36f86fbb7 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +Dockerfile +.travis.yml +.gitignore +demo/etc +tox.ini diff --git a/Dockerfile b/Dockerfile index 5f0433004f..277246b697 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,15 +2,16 @@ FROM python:2-alpine RUN apk add --no-cache --virtual .nacl_deps build-base libffi-dev zlib-dev openssl-dev libjpeg-turbo-dev linux-headers -COPY synapse /usr/local/src/synapse -COPY setup.py setup.cfg README.rst synctl /usr/local/src/ +COPY . /synapse -RUN cd /usr/local/src \ - && pip install --upgrade --process-dependency-links . \ +# A wheel cache may be provided in ./cache for faster build +RUN cd /synapse \ + && pip install --upgrade pip setuptools \ + && mkdir -p /synapse/cache \ + && pip install -f /synapse/cache --upgrade --process-dependency-links . \ + && mv /synapse/contrib/docker/* / \ && rm -rf setup.py setup.cfg synapse -COPY contrib/docker / - VOLUME ["/data"] ENTRYPOINT ["/start.py"] diff --git a/contrib/docker/start.py b/contrib/docker/start.py index 4f63ea1ad5..2c427ba1b7 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -22,6 +22,7 @@ if mode == "generate": # Parse the configuration file if not os.path.exists("/compiled"): os.mkdir("/compiled") + convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml") convert("/conf/log.config", "/compiled/%s.log.config" % os.environ.get("SYNAPSE_SERVER_NAME")) -- cgit 1.4.1 From 6d1e28a8426da9e954a3edec25a8717376c583f3 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 11:14:06 +0100 Subject: Generate any missing keys before starting synapse --- contrib/docker/start.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/contrib/docker/start.py b/contrib/docker/start.py index 2c427ba1b7..e50d23be5f 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -3,7 +3,7 @@ import jinja2 import os import sys -import socket +import subprocess convert = lambda src, dst: open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ)) mode = sys.argv[1] if len(sys.argv) > 1 else None @@ -12,19 +12,20 @@ if "SYNAPSE_SERVER_NAME" not in os.environ: print("Environment variable SYNAPSE_SERVER_NAME is mandatory, exiting.") sys.exit(2) -params = ["--server-name", os.environ.get("SYNAPSE_SERVER_NAME"), - "--report-stats", os.environ.get("SYNAPSE_REPORT_STATS", "no"), - "--config-path", os.environ.get("SYNAPSE_CONFIG_PATH", "/compiled/homeserver.yaml")] - -if mode == "generate": - params.append("--generate-config") +args = ["python", "-m", "synapse.app.homeserver", + "--server-name", os.environ.get("SYNAPSE_SERVER_NAME"), + "--report-stats", os.environ.get("SYNAPSE_REPORT_STATS", "no"), + "--config-path", os.environ.get("SYNAPSE_CONFIG_PATH", "/compiled/homeserver.yaml")] # Parse the configuration file -if not os.path.exists("/compiled"): - os.mkdir("/compiled") - +if not os.path.exists("/compiled"): os.mkdir("/compiled") convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml") convert("/conf/log.config", "/compiled/%s.log.config" % os.environ.get("SYNAPSE_SERVER_NAME")) -# TODO, replace with a call to synapse.app.homeserver.run() -os.execv("/usr/local/bin/python", ["python", "-m", "synapse.app.homeserver"] + params) +# In generate mode, generate a configuration, missing keys, then exit +if mode == "generate": + os.execv("/usr/local/bin/python", args + ["--generate-config"]) +# In normal mode, generate missing keys if any, then run synapse +else: + subprocess.check_output(args + ["--generate-keys"]) + os.execv("/usr/local/bin/python", args) -- cgit 1.4.1 From f2bf0cda02fef358172033b28dab5f1805c31cad Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 11:40:20 +0100 Subject: Generate shared secrets if not defined in the environment --- contrib/docker/start.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/contrib/docker/start.py b/contrib/docker/start.py index e50d23be5f..7057f85f61 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -5,10 +5,11 @@ import os import sys import subprocess -convert = lambda src, dst: open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ)) +convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ)) mode = sys.argv[1] if len(sys.argv) > 1 else None +environ = os.environ.copy() -if "SYNAPSE_SERVER_NAME" not in os.environ: +if "SYNAPSE_SERVER_NAME" not in environ: print("Environment variable SYNAPSE_SERVER_NAME is mandatory, exiting.") sys.exit(2) @@ -17,10 +18,16 @@ args = ["python", "-m", "synapse.app.homeserver", "--report-stats", os.environ.get("SYNAPSE_REPORT_STATS", "no"), "--config-path", os.environ.get("SYNAPSE_CONFIG_PATH", "/compiled/homeserver.yaml")] +# Generate any missing shared secret +for secret in ("SYNAPSE_REGISTRATION_SHARED_SECRET", "SYNAPSE_MACAROON_SECRET_KEY"): + if secret not in environ: + print("Generating a random secret for {}".format(secret)) + environ[secret] = os.urandom(32).encode("hex") + # Parse the configuration file if not os.path.exists("/compiled"): os.mkdir("/compiled") -convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml") -convert("/conf/log.config", "/compiled/%s.log.config" % os.environ.get("SYNAPSE_SERVER_NAME")) +convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ) +convert("/conf/log.config", "/compiled/%s.log.config" % environ.get("SYNAPSE_SERVER_NAME"), environ) # In generate mode, generate a configuration, missing keys, then exit if mode == "generate": -- cgit 1.4.1 From 886c2d50197bb3558168ce45b6975777780c7aad Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 12:20:29 +0100 Subject: Support an external postgresql config in the Docker image --- contrib/docker/conf/homeserver.yaml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml index 851c389c19..1a685320c0 100644 --- a/contrib/docker/conf/homeserver.yaml +++ b/contrib/docker/conf/homeserver.yaml @@ -46,8 +46,17 @@ listeners: ## Database ## -{% if SYNAPSE_DB_HOST %} - +{% if SYNAPSE_DB_PASSWORD %} +database: + name: "psycopg2" + args: + user: "{{ SYNAPSE_DB_USER or "matrix" }}" + password: "{{ SYNAPSE_DB_PASSWORD }}" + database: "{{ SYNAPSE_DB_DATABASE or "matrix" }}" + host: "{{ SYNAPSE_DB_HOST or "db" }}" + port: "{{ SYNAPSE_DB_PORT or "5432" }}" + cp_min: 5 + cp_max: 10 {% else %} database: name: "sqlite3" -- cgit 1.4.1 From 042757feb25ba9f0cccfdaa6c8775957880c2d2e Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 12:28:42 +0100 Subject: Install the postgres dependencies --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 277246b697..881c25c243 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,12 @@ FROM python:2-alpine -RUN apk add --no-cache --virtual .nacl_deps build-base libffi-dev zlib-dev openssl-dev libjpeg-turbo-dev linux-headers +RUN apk add --no-cache --virtual .nacl_deps build-base libffi-dev zlib-dev openssl-dev libjpeg-turbo-dev linux-headers postgresql-dev COPY . /synapse # A wheel cache may be provided in ./cache for faster build RUN cd /synapse \ - && pip install --upgrade pip setuptools \ + && pip install --upgrade pip setuptools psycopg2 \ && mkdir -p /synapse/cache \ && pip install -f /synapse/cache --upgrade --process-dependency-links . \ && mv /synapse/contrib/docker/* / \ -- cgit 1.4.1 From 1ba2fe114c79dd0f2f2fb8868f8cb0a9f5893652 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 12:55:20 +0100 Subject: Provide an example docker compose file --- Dockerfile | 2 ++ contrib/docker/docker-compose.yml | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 contrib/docker/docker-compose.yml diff --git a/Dockerfile b/Dockerfile index 881c25c243..25f3746303 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,4 +14,6 @@ RUN cd /synapse \ VOLUME ["/data"] +EXPOSE 8448 + ENTRYPOINT ["/start.py"] diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml new file mode 100644 index 0000000000..659bee2979 --- /dev/null +++ b/contrib/docker/docker-compose.yml @@ -0,0 +1,32 @@ +# This compose file is compatible with Copose itself, it might need some +# adjustments to run properly with stack. + +version: '3' + +services: + + synapse: + image: matrix/synapse + # See the readme for a full documentation of the environment settings + environment: + - SYNAPSE_SERVER_NAME=my.matrix.host + - SYNAPSE_DB_PASSWORD=changeme + volumes: + - ./files:/data + # One may either expose ports directly + ports: + - 8448:8448/tcp + # ... or use a reverse proxy, here is an example for traefik + labels: + - traefik.enable=true + - traefik.frontend.rule=Host:my.matrix.Host + - traefik.port=8448 + + db: + image: postgres:latest + # Change that password, of course! + environment: + - POSTGRES_USER=matrix + - POSTGRES_PASSWORD=changeme + volumes: + - ./schemas:/var/lib/postgres -- cgit 1.4.1 From a207cccb059451682564cddf0e39c7c45b06cb72 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 15:04:26 +0100 Subject: Reuse environment variables of the postgres container --- contrib/docker/conf/homeserver.yaml | 12 ++++++------ contrib/docker/docker-compose.yml | 11 ++++++++--- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml index 1a685320c0..7450cc1228 100644 --- a/contrib/docker/conf/homeserver.yaml +++ b/contrib/docker/conf/homeserver.yaml @@ -46,15 +46,15 @@ listeners: ## Database ## -{% if SYNAPSE_DB_PASSWORD %} +{% if POSTGRES_PASSWORD %} database: name: "psycopg2" args: - user: "{{ SYNAPSE_DB_USER or "matrix" }}" - password: "{{ SYNAPSE_DB_PASSWORD }}" - database: "{{ SYNAPSE_DB_DATABASE or "matrix" }}" - host: "{{ SYNAPSE_DB_HOST or "db" }}" - port: "{{ SYNAPSE_DB_PORT or "5432" }}" + user: "{{ POSTGRES_USER or "matrix" }}" + password: "{{ POSTGRES_PASSWORD }}" + database: "{{ POSTGRES_DB or "matrix" }}" + host: "{{ POSTGRES_HOST or "db" }}" + port: "{{ POSTGRES_PORT or "5432" }}" cp_min: 5 cp_max: 10 {% else %} diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 659bee2979..b8f9741f05 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -6,13 +6,18 @@ version: '3' services: synapse: - image: matrix/synapse + image: synapse + # Since snyapse does not retry to connect to the database, restart upon + # failure + restart: unless-stopped # See the readme for a full documentation of the environment settings environment: - SYNAPSE_SERVER_NAME=my.matrix.host - - SYNAPSE_DB_PASSWORD=changeme + - SYNAPSE_ENABLE_REGISTRATION=yes volumes: - ./files:/data + depends_on: + - db # One may either expose ports directly ports: - 8448:8448/tcp @@ -29,4 +34,4 @@ services: - POSTGRES_USER=matrix - POSTGRES_PASSWORD=changeme volumes: - - ./schemas:/var/lib/postgres + - ./schemas:/var/lib/postgresql/data -- cgit 1.4.1 From 84a9209ba7294243e5bbefede46a3dee7994da9b Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 15:08:43 +0100 Subject: Remove etc/service files from rob's branch --- contrib/docker/rob/docker-compose.yaml | 39 ----------- .../docker/rob/rootfs/etc/service/synapse/finish | 17 ----- contrib/docker/rob/rootfs/etc/service/synapse/run | 75 ---------------------- 3 files changed, 131 deletions(-) delete mode 100644 contrib/docker/rob/docker-compose.yaml delete mode 100755 contrib/docker/rob/rootfs/etc/service/synapse/finish delete mode 100755 contrib/docker/rob/rootfs/etc/service/synapse/run diff --git a/contrib/docker/rob/docker-compose.yaml b/contrib/docker/rob/docker-compose.yaml deleted file mode 100644 index 73cc29f8fd..0000000000 --- a/contrib/docker/rob/docker-compose.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2017 Vector Creations Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: '3' - -services: - postgres: - image: postgres:9.6.5-alpine - environment: - POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} - POSTGRES_DB: synapse - expose: - - 5432 - restart: unless-stopped - volumes: - - postgres-data:/var/lib/postgresql/data/ - - synapse: - image: matrixdotorg/synapse:v0.22.1 - ports: - - 8008:8008 - - 8448:8448 - restart: unless-stopped - volumes: - - ${CONFIG_PATH}:/synapse/config/ - -volumes: - postgres-data: diff --git a/contrib/docker/rob/rootfs/etc/service/synapse/finish b/contrib/docker/rob/rootfs/etc/service/synapse/finish deleted file mode 100755 index 2aace581a1..0000000000 --- a/contrib/docker/rob/rootfs/etc/service/synapse/finish +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# -# Copyright 2017 Vector Creations Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kill -TERM 1 diff --git a/contrib/docker/rob/rootfs/etc/service/synapse/run b/contrib/docker/rob/rootfs/etc/service/synapse/run deleted file mode 100755 index dd797d3ef9..0000000000 --- a/contrib/docker/rob/rootfs/etc/service/synapse/run +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash -# -# Copyright 2017 Vector Creations Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -: ${CONFIG_PATH:="/synapse/config"} -: ${POSTGRES_DATABASE:="synapse"} -: ${POSTGRES_HOST:="postgres"} -: ${POSTGRES_USER:="postgres"} -: ${REPORT_STATS:="yes"} -: ${SERVER_NAME:="localhost"} - -DATABASE_CONFIG_PATH="${CONFIG_PATH}/database.yaml" -HOMESERVER_CONFIG_PATH="${CONFIG_PATH}/homeserver.yaml" -SYNAPSE_COMMAND="python -m synapse.app.homeserver" - -. /synapse/bin/activate -cd /synapse - -if [[ -n "${GENERATE_CONFIG}" ]]; then - ${SYNAPSE_COMMAND} \ - --server-name ${SERVER_NAME} \ - --config-path ${HOMESERVER_CONFIG_PATH} \ - --generate-config \ - --report-stats=${REPORT_STATS} - - if [[ -f "${DATABASE_CONFIG_PATH}" ]]; then - echo "Config file '${DATABASE_CONFIG_PATH}' already exists. Remove it if you want it to be generated." - else - echo "Generating ${DATABASE_CONFIG_PATH}..." - if [[ -n "${POSTGRES_PASSWORD}" ]]; then - (cat > ${DATABASE_CONFIG_PATH}) < ${DATABASE_CONFIG_PATH}) < Date: Sun, 4 Feb 2018 15:27:32 +0100 Subject: Update sumperdump Docker readme to match this image properties --- contrib/docker/README.md | 102 +++++++++++++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 38 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index c15517d0e0..8ba5f79692 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -1,5 +1,12 @@ # Synapse Docker +This Docker image will run Synapse as a single process. It does not provide any +database server or TURN server that you should run separately. + +If you run a Postgres server, you should simply have it in the same Compose +project or set the proper environment variables and the image will automatically +use that server. + ## Build Build the docker image with the `docker build` command from the root of the synapse repository. @@ -10,54 +17,29 @@ docker build -t matrixdotorg/synapse:v0.22.1 . The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:` where `` is the same as the release tag in the synapse git repository. -## Configure +You may have a local Python wheel cache available, in which case copy the relevant packages in the ``cache/`` directory at the root of the project. -Synapse provides a command for generating homeserver configuration files. These are a good starting point for setting up your own deployment. +## Run -The documentation below will refer to a `CONFIG_PATH` shell variable. This is a path to a directory where synapse configuration will be stored. It needs to be mapped into the container as a volume at `/synapse/config/` as can be seen in the example `docker run` command. +It is recommended that you use Docker Compose to run your containers, including +this image and a Postgres server. A sample ``docker-compose.yml`` is provided, +with example labels for a reverse proxy and other artifacts. -Docker container environment variables: -* `GENERATE_CONFIG` - Set this to any non-empty string, such as `yes`, to trigger generation of configuration files. Existing files in the `CONFIG_PATH` will **not** be overwritten. -* `POSTGRES_DATABASE` - The database name for the synapse postgres database. [default: `synapse`] -* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `postgres` which is useful when using a container on the same docker network in a compose file where the postgres service is called `postgres`] **NOTE**: `localhost` and `127.0.0.1` refer to the container itself unless running the container with `host` networking. -* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy. -* `POSTGRES_USER` - The user for the synapse postgres database. [default: `postgres`] -* `REPORT_STATS` - Whether to send anonymous usage statistics back to the Matrix project which helps us to get funding! Must be `yes` or `no`. [default: `yes`] -* `SERVER_NAME` - The domain used for the Matrix homeserver. If you intend to run this synapse instance on a public domain, use that domain. [default: `localhost`] +Then, to run the server: ``` -CONFIG_PATH=/my/magical/config/path/ -mkdir -p ${CONFIG_PATH} -docker run \ - --rm \ - -e GENERATE_CONFIG=yes \ - -e POSTGRES_PASSWORD=MyVerySecretPassword \ - -e REPORT_STATS=yes \ - -e SERVER_NAME=example.com \ - -v ${CONFIG_PATH}:/synapse/config/ \ - matrixdotorg/synapse:v0.22.1 +docker-compose up -d ``` -This will create a temporary container from the image and use the synapse code for generating configuration files and TLS keys and certificates for the specified `SERVER_NAME` domain. The files are written to `CONFIG_PATH`. +In the case you specified a custom path for you configuration file and wish to +generate a fresh ``homeserver.yaml``, simply run: -## Run - -**NOTE**: If you are not using postgresql and are using sqlite3 as your database, you will need to make a directory to store the sqlite3 database file in and then mount this volume into the container at `/synapse/data/`. As it is so easy to use postgresql, when using Docker containers, this is not documented to somewhat discourage it. Choose a `POSTGRES_PASSWORD` instead. - -### Docker Compose - -A `docker-compose.yaml` file is included to ease deployment of the basic synapse and postgres setup. Remember to set a `POSTGRES_PASSWORD` when generating your configuration above. You will need it for running the containers in the composition. - -From the `docker/` subdirectory of the synapse repository: ``` -CONFIG_PATH=/my/magical/config/path/ -POSTGRES_PASSWORD=MyVerySecretPassword \ -docker-compose \ - -p synapse \ - up -d +docker-compose run synapse generate ``` -### Docker +If you do not wish to use Compose, you may still run this image using plain +Docker commands: Note that the following is just a guideline and you may need to add parameters to the docker run command to account for the network situation with your postgres database. @@ -65,6 +47,50 @@ Note that the following is just a guideline and you may need to add parameters t docker run \ -d \ --name synapse \ - -v ${CONFIG_PATH}:/synapse/config/ \ + -v ${DATA_PATH}:/data \ + -e SYNAPSE_SERVER_NAME=my.matrix.host \ matrixdotorg/synapse:v0.22.1 ``` + + +## Volumes + +The image expects a single volue, located at ``/data``, that will hold: + +* temporary files during uploads; +* uploaded media and thumbnais; +* the SQLite database if you do not configure postgres. + +## Environment + +If you do not specify a custom path for the configuration file, a very generic +file will be generated, based on the following environment settings. +These are a good starting point for setting up your own deployment. + +Synapse specific settings: + +* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname. +* ``SYNAPSE_CONFIG_PATH``, path to a custom config file (will ignore all + other options then). +* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if + you run your own TLS-capable reverse proxy). +* ``SYNAPSE_WEB_CLIENT``, set this variable to enable the embedded Web client. +* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on + the Synapse instance. +* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server. +* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`]. +* ``SYNAPSE_REPORT_STATS``, set this variable to `yes` to enable anonymous + statistics reporting back to the Matrix project which helps us to get funding. + +Shared secrets, these will be initialized to random values if not set: + +* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if + registration is disable. +* ``SYNAPSE_MACAROON_SECRET_KEY``, secret for Macaroon. + +Database specific values (will use SQLite if not set): + +* `POSTGRES_DATABASE` - The database name for the synapse postgres database. [default: `matrix`] +* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`] +* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy. +* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`] -- cgit 1.4.1 From b8ab78b82c60ab9c897891cf9f53c9cc44873a25 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 15:41:54 +0100 Subject: Add the build cache/ folder to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 491047c352..d13017f400 100644 --- a/.gitignore +++ b/.gitignore @@ -32,6 +32,7 @@ demo/media_store.* demo/etc uploads +cache .idea/ media_store/ -- cgit 1.4.1 From f72c9c1fb650c7f3bac4d77d4e24fa0469d09ebb Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 16:18:40 +0100 Subject: Fix multiple typos --- MANIFEST.in | 1 + contrib/docker/README.md | 19 ++++++++++++------- contrib/docker/docker-compose.yml | 7 ++++--- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index afb60e12ee..8c9a57a9ca 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -25,6 +25,7 @@ recursive-include synapse/static *.js exclude jenkins.sh exclude jenkins*.sh exclude jenkins* +exclude Dockerfile recursive-exclude jenkins *.sh prune .github diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 8ba5f79692..73e53e4306 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -23,7 +23,7 @@ You may have a local Python wheel cache available, in which case copy the releva It is recommended that you use Docker Compose to run your containers, including this image and a Postgres server. A sample ``docker-compose.yml`` is provided, -with example labels for a reverse proxy and other artifacts. +including example labels for reverse proxying and other artifacts. Then, to run the server: @@ -35,13 +35,13 @@ In the case you specified a custom path for you configuration file and wish to generate a fresh ``homeserver.yaml``, simply run: ``` -docker-compose run synapse generate +docker-compose run --rm synapse generate ``` If you do not wish to use Compose, you may still run this image using plain -Docker commands: - -Note that the following is just a guideline and you may need to add parameters to the docker run command to account for the network situation with your postgres database. +Docker commands. Note that the following is just a guideline and you may need +to add parameters to the docker run command to account for the network situation +with your postgres database. ``` docker run \ @@ -55,7 +55,7 @@ docker run \ ## Volumes -The image expects a single volue, located at ``/data``, that will hold: +The image expects a single volume, located at ``/data``, that will hold: * temporary files during uploads; * uploaded media and thumbnais; @@ -63,10 +63,15 @@ The image expects a single volue, located at ``/data``, that will hold: ## Environment -If you do not specify a custom path for the configuration file, a very generic +Unless you specify a custom path for the configuration file, a very generic file will be generated, based on the following environment settings. These are a good starting point for setting up your own deployment. +Global settings: + +* ``UID``, the user id Synapse will run as [default 991] +* ``GID``, the group id Synapse will run as [default 991] + Synapse specific settings: * ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname. diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index b8f9741f05..727743effa 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -1,4 +1,4 @@ -# This compose file is compatible with Copose itself, it might need some +# This compose file is compatible with Compose itself, it might need some # adjustments to run properly with stack. version: '3' @@ -18,10 +18,11 @@ services: - ./files:/data depends_on: - db - # One may either expose ports directly + # In order to expose Synapse, remove one of the following, you might for + # instance expose the TLS port directly: ports: - 8448:8448/tcp - # ... or use a reverse proxy, here is an example for traefik + # ... or use a reverse proxy, here is an example for traefik: labels: - traefik.enable=true - traefik.frontend.rule=Host:my.matrix.Host -- cgit 1.4.1 From e9021e16c49c4224782040449b43fb0015c9f05c Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 4 Feb 2018 23:19:08 +0100 Subject: Run the server as an unprivileged user --- Dockerfile | 2 +- contrib/docker/start.py | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index 25f3746303..f687a4f2bb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM python:2-alpine -RUN apk add --no-cache --virtual .nacl_deps build-base libffi-dev zlib-dev openssl-dev libjpeg-turbo-dev linux-headers postgresql-dev +RUN apk add --no-cache --virtual .nacl_deps su-exec build-base libffi-dev zlib-dev openssl-dev libjpeg-turbo-dev linux-headers postgresql-dev COPY . /synapse diff --git a/contrib/docker/start.py b/contrib/docker/start.py index 7057f85f61..8bc72bf428 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -9,14 +9,16 @@ convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(sr mode = sys.argv[1] if len(sys.argv) > 1 else None environ = os.environ.copy() +# Check mandatory parameters and build the base start arguments if "SYNAPSE_SERVER_NAME" not in environ: print("Environment variable SYNAPSE_SERVER_NAME is mandatory, exiting.") sys.exit(2) +permissions = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991)) args = ["python", "-m", "synapse.app.homeserver", - "--server-name", os.environ.get("SYNAPSE_SERVER_NAME"), - "--report-stats", os.environ.get("SYNAPSE_REPORT_STATS", "no"), - "--config-path", os.environ.get("SYNAPSE_CONFIG_PATH", "/compiled/homeserver.yaml")] + "--server-name", environ.get("SYNAPSE_SERVER_NAME"), + "--report-stats", environ.get("SYNAPSE_REPORT_STATS", "no"), + "--config-path", environ.get("SYNAPSE_CONFIG_PATH", "/compiled/homeserver.yaml")] # Generate any missing shared secret for secret in ("SYNAPSE_REGISTRATION_SHARED_SECRET", "SYNAPSE_MACAROON_SECRET_KEY"): @@ -35,4 +37,5 @@ if mode == "generate": # In normal mode, generate missing keys if any, then run synapse else: subprocess.check_output(args + ["--generate-keys"]) - os.execv("/usr/local/bin/python", args) + subprocess.check_output(["chown", "-R", permissions, "/data"]) + os.execv("/sbin/su-exec", ["su-exec", permissions] + args) -- cgit 1.4.1 From 8db84e9b2198ad4a80f6e9e66e2fc8c2d44fbeca Mon Sep 17 00:00:00 2001 From: kaiyou Date: Mon, 5 Feb 2018 20:08:35 +0100 Subject: Remove docker related files from the python manifest --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index 8c9a57a9ca..e2a6623a63 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -26,6 +26,7 @@ exclude jenkins.sh exclude jenkins*.sh exclude jenkins* exclude Dockerfile +exclude .dockerignore recursive-exclude jenkins *.sh prune .github -- cgit 1.4.1 From 81010a126e9bbcce018b104921ff5221a67f99cc Mon Sep 17 00:00:00 2001 From: kaiyou Date: Mon, 5 Feb 2018 21:28:15 +0100 Subject: Add dynamic recaptcha configuration in the Docker image --- contrib/docker/README.md | 4 ++++ contrib/docker/conf/homeserver.yaml | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 73e53e4306..f4bc78908d 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -86,6 +86,10 @@ Synapse specific settings: * ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`]. * ``SYNAPSE_REPORT_STATS``, set this variable to `yes` to enable anonymous statistics reporting back to the Matrix project which helps us to get funding. +* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public + key in order to enable recaptcha upon registration +* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private + key in order to enable recaptcha upon registration Shared secrets, these will be initialized to random values if not set: diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml index 7450cc1228..6f1de24aad 100644 --- a/contrib/docker/conf/homeserver.yaml +++ b/contrib/docker/conf/homeserver.yaml @@ -112,10 +112,17 @@ max_spider_size: "10M" ## Captcha ## +{% if SYNAPSE_RECAPTCHA_PUBLIC_KEY %} +recaptcha_public_key: "{{ SYNAPSE_RECAPTCHA_PUBLIC_KEY }}" +recaptcha_private_key: "{{ SYNAPSE_RECAPTCHA_PRIVATE_KEY }}" +enable_registration_captcha: True +recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify" +{% else %} recaptcha_public_key: "YOUR_PUBLIC_KEY" recaptcha_private_key: "YOUR_PRIVATE_KEY" enable_registration_captcha: False recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify" +{% endif %} ## Turn ## -- cgit 1.4.1 From cd51931b62aef63dacf4d79cdfa5de56da4eeda6 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Mon, 5 Feb 2018 21:53:53 +0100 Subject: Add dynamic TURN configuration in the Docker image --- contrib/docker/README.md | 7 +++++-- contrib/docker/conf/homeserver.yaml | 9 +++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index f4bc78908d..0da7b56628 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -87,9 +87,12 @@ Synapse specific settings: * ``SYNAPSE_REPORT_STATS``, set this variable to `yes` to enable anonymous statistics reporting back to the Matrix project which helps us to get funding. * ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public - key in order to enable recaptcha upon registration + key in order to enable recaptcha upon registration. * ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private - key in order to enable recaptcha upon registration + key in order to enable recaptcha upon registration. +* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN + uris to enable TURN for this homeserver. +* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required. Shared secrets, these will be initialized to random values if not set: diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml index 6f1de24aad..6f8fb24e5f 100644 --- a/contrib/docker/conf/homeserver.yaml +++ b/contrib/docker/conf/homeserver.yaml @@ -126,10 +126,19 @@ recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify" ## Turn ## +{% if SYNAPSE_TURN_URIS %} +turn_uris: +{% for uri in SYNAPSE_TURN_URIS.split(',') %} - {{ uri }} +{% endfor %} +turn_shared_secret: "{{ SYNAPSE_TURN_SECRET }}" +turn_user_lifetime: "1h" +turn_allow_guests: True +{% else %} turn_uris: [] turn_shared_secret: "YOUR_SHARED_SECRET" turn_user_lifetime: "1h" turn_allow_guests: True +{% endif %} ## Registration ## -- cgit 1.4.1 From cf4ef60e287a6a61844ab260606721db2b7ee0cd Mon Sep 17 00:00:00 2001 From: kaiyou Date: Mon, 5 Feb 2018 22:10:03 +0100 Subject: Document the cache factor environment variable for Docker --- contrib/docker/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 0da7b56628..8b1e0afee6 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -84,6 +84,7 @@ Synapse specific settings: the Synapse instance. * ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server. * ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`]. +* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`]. * ``SYNAPSE_REPORT_STATS``, set this variable to `yes` to enable anonymous statistics reporting back to the Matrix project which helps us to get funding. * ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public -- cgit 1.4.1 From d8c7da5dca907bf65293b6b967200141cad69410 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Mon, 5 Feb 2018 22:12:50 +0100 Subject: Fix a typo in the Docker README --- contrib/docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 8b1e0afee6..624dca747a 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -58,7 +58,7 @@ docker run \ The image expects a single volume, located at ``/data``, that will hold: * temporary files during uploads; -* uploaded media and thumbnais; +* uploaded media and thumbnails; * the SQLite database if you do not configure postgres. ## Environment -- cgit 1.4.1 From f5364b47ec4b67a552976b9dbb9594d448744e30 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Mon, 5 Feb 2018 22:14:40 +0100 Subject: Point to the 'latest' tag in the Docker documentation --- contrib/docker/README.md | 2 +- contrib/docker/docker-compose.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 624dca747a..87354b9bc3 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -49,7 +49,7 @@ docker run \ --name synapse \ -v ${DATA_PATH}:/data \ -e SYNAPSE_SERVER_NAME=my.matrix.host \ - matrixdotorg/synapse:v0.22.1 + matrixdotorg/synapse:latest ``` diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 727743effa..3fb156db47 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -6,7 +6,7 @@ version: '3' services: synapse: - image: synapse + image: matrixdotorg/synapse:latest # Since snyapse does not retry to connect to the database, restart upon # failure restart: unless-stopped -- cgit 1.4.1 From 630573a9325f826e81f04650fd83e64d1c831035 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Mon, 5 Feb 2018 22:57:22 +0100 Subject: Do not copy documentation files to the Docker root folder --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index f687a4f2bb..2ae503ce79 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ RUN cd /synapse \ && pip install --upgrade pip setuptools psycopg2 \ && mkdir -p /synapse/cache \ && pip install -f /synapse/cache --upgrade --process-dependency-links . \ - && mv /synapse/contrib/docker/* / \ + && mv /synapse/contrib/docker/start.py /synapse/contrib/docker/conf / \ && rm -rf setup.py setup.cfg synapse VOLUME ["/data"] -- cgit 1.4.1 From ee3b160a2ad375223b4304184304605a35f1b406 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Mon, 5 Feb 2018 22:57:35 +0100 Subject: Only generate configuration files when necessary --- contrib/docker/start.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/contrib/docker/start.py b/contrib/docker/start.py index 8bc72bf428..d3364e4226 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -26,16 +26,18 @@ for secret in ("SYNAPSE_REGISTRATION_SHARED_SECRET", "SYNAPSE_MACAROON_SECRET_KE print("Generating a random secret for {}".format(secret)) environ[secret] = os.urandom(32).encode("hex") -# Parse the configuration file -if not os.path.exists("/compiled"): os.mkdir("/compiled") -convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ) -convert("/conf/log.config", "/compiled/%s.log.config" % environ.get("SYNAPSE_SERVER_NAME"), environ) - # In generate mode, generate a configuration, missing keys, then exit if mode == "generate": os.execv("/usr/local/bin/python", args + ["--generate-config"]) + # In normal mode, generate missing keys if any, then run synapse else: + # Parse the configuration file + if "SYNAPSE_CONFIG_PATH" not in environ: + if not os.path.exists("/compiled"): os.mkdir("/compiled") + convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ) + convert("/conf/log.config", "/compiled/%s.log.config" % environ.get("SYNAPSE_SERVER_NAME"), environ) + # Generate missing keys and start synapse subprocess.check_output(args + ["--generate-keys"]) subprocess.check_output(["chown", "-R", permissions, "/data"]) os.execv("/sbin/su-exec", ["su-exec", permissions] + args) -- cgit 1.4.1 From 107a5c94418de55d52a0232115d5c3c7efc6d285 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Mon, 5 Feb 2018 23:02:33 +0100 Subject: Add the non-tls port to the expose list --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 2ae503ce79..bec884ce9b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,6 +14,6 @@ RUN cd /synapse \ VOLUME ["/data"] -EXPOSE 8448 +EXPOSE 8008/tcp 8448/tcp ENTRYPOINT ["/start.py"] -- cgit 1.4.1 From 1ffd9cb93617fe9bb2367d575786c0ff222cd415 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Mon, 5 Feb 2018 23:13:27 +0100 Subject: Support loading application service files from /data/appservices/ --- contrib/docker/README.md | 7 ++++++- contrib/docker/conf/homeserver.yaml | 9 ++++++++- contrib/docker/start.py | 4 ++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 87354b9bc3..c1724fe269 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -59,7 +59,12 @@ The image expects a single volume, located at ``/data``, that will hold: * temporary files during uploads; * uploaded media and thumbnails; -* the SQLite database if you do not configure postgres. +* the SQLite database if you do not configure postgres; +* the appservices configuration. + +In order to setup an application service, simply create an ``appservices`` +directory in the data volume and write the application service Yaml +configuration file there. Multiple application services are supported. ## Environment diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml index 6f8fb24e5f..e5d3f965e4 100644 --- a/contrib/docker/conf/homeserver.yaml +++ b/contrib/docker/conf/homeserver.yaml @@ -128,7 +128,7 @@ recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify" {% if SYNAPSE_TURN_URIS %} turn_uris: -{% for uri in SYNAPSE_TURN_URIS.split(',') %} - {{ uri }} +{% for uri in SYNAPSE_TURN_URIS.split(',') %} - "{{ uri }}" {% endfor %} turn_shared_secret: "{{ SYNAPSE_TURN_SECRET }}" turn_user_lifetime: "1h" @@ -167,7 +167,14 @@ room_invite_state_types: - "m.room.avatar" - "m.room.name" +{% if SYNAPSE_APPSERVICES %} +app_service_config_files: +{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}" +{% endfor %} +{% else %} app_service_config_files: [] +{% endif %} + macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}" expire_access_token: False diff --git a/contrib/docker/start.py b/contrib/docker/start.py index d3364e4226..8ade0f227d 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -4,6 +4,7 @@ import jinja2 import os import sys import subprocess +import glob convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ)) mode = sys.argv[1] if len(sys.argv) > 1 else None @@ -26,6 +27,9 @@ for secret in ("SYNAPSE_REGISTRATION_SHARED_SECRET", "SYNAPSE_MACAROON_SECRET_KE print("Generating a random secret for {}".format(secret)) environ[secret] = os.urandom(32).encode("hex") +# Load appservices configurations +environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml") + # In generate mode, generate a configuration, missing keys, then exit if mode == "generate": os.execv("/usr/local/bin/python", args + ["--generate-config"]) -- cgit 1.4.1 From 63fd148724399d52f3435b1c172435d7cabcde4c Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 19:46:11 +0100 Subject: Make it clear that two modes are avaiable in the documentation, improve the compose file --- contrib/docker/README.md | 17 ++++++++++------- contrib/docker/docker-compose.yml | 12 ++++++++++-- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index c1724fe269..197bad103e 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -12,7 +12,7 @@ use that server. Build the docker image with the `docker build` command from the root of the synapse repository. ``` -docker build -t matrixdotorg/synapse:v0.22.1 . +docker build -t matrixdotorg/synapse . ``` The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:` where `` is the same as the release tag in the synapse git repository. @@ -76,12 +76,17 @@ Global settings: * ``UID``, the user id Synapse will run as [default 991] * ``GID``, the group id Synapse will run as [default 991] +* ``SYNAPSE_CONFIG_PATH``, path to a custom config file -Synapse specific settings: +If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file +then customize it manually. No other environment variable is required. + +Otherwise, a dynamic configuration file will be used. The following environment +variables are available for configuration: * ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname. -* ``SYNAPSE_CONFIG_PATH``, path to a custom config file (will ignore all - other options then). +* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``not``), enable anonymous + statistics reporting back to the Matrix project which helps us to get funding. * ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if you run your own TLS-capable reverse proxy). * ``SYNAPSE_WEB_CLIENT``, set this variable to enable the embedded Web client. @@ -90,8 +95,6 @@ Synapse specific settings: * ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server. * ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`]. * ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`]. -* ``SYNAPSE_REPORT_STATS``, set this variable to `yes` to enable anonymous - statistics reporting back to the Matrix project which helps us to get funding. * ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public key in order to enable recaptcha upon registration. * ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private @@ -100,7 +103,7 @@ Synapse specific settings: uris to enable TURN for this homeserver. * ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required. -Shared secrets, these will be initialized to random values if not set: +Shared secrets, that will be initialized to random values if not set: * ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if registration is disable. diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 3fb156db47..b07984ea34 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -6,7 +6,7 @@ version: '3' services: synapse: - image: matrixdotorg/synapse:latest + image: docker.io/matrixdotorg/synapse:latest # Since snyapse does not retry to connect to the database, restart upon # failure restart: unless-stopped @@ -15,7 +15,12 @@ services: - SYNAPSE_SERVER_NAME=my.matrix.host - SYNAPSE_ENABLE_REGISTRATION=yes volumes: + # You may either store all the files in a local folder - ./files:/data + # .. or you may split this between different storage points + # - ./files:/data + # - /path/to/ssd:/data/uploads + # - /path/to/large_hdd:/data/media depends_on: - db # In order to expose Synapse, remove one of the following, you might for @@ -29,10 +34,13 @@ services: - traefik.port=8448 db: - image: postgres:latest + image: postgres:10-alpine # Change that password, of course! environment: - POSTGRES_USER=matrix - POSTGRES_PASSWORD=changeme volumes: + # You may store the database tables in a local folder.. - ./schemas:/var/lib/postgresql/data + # .. or store them on some high performance storage for better results + # - /path/to/ssd/storage:/var/lib/postfesql/data -- cgit 1.4.1 From 58df3a8c5dcf5de12bcf9190551dc8241d32b8a1 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 19:48:53 +0100 Subject: Add some documentation about high performance storage --- contrib/docker/README.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 197bad103e..3710afb0cf 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -31,11 +31,11 @@ Then, to run the server: docker-compose up -d ``` -In the case you specified a custom path for you configuration file and wish to +In the case you specify a custom path for you configuration file and wish to generate a fresh ``homeserver.yaml``, simply run: ``` -docker-compose run --rm synapse generate +docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate ``` If you do not wish to use Compose, you may still run this image using plain @@ -62,6 +62,11 @@ The image expects a single volume, located at ``/data``, that will hold: * the SQLite database if you do not configure postgres; * the appservices configuration. +You are free to use separate volumes depending on storage endpoints at your +disposal. For instance, ``/data/media`` coud be stored on a large but low +performance hdd storage while other files could be stored on high performance +endpoints. + In order to setup an application service, simply create an ``appservices`` directory in the data volume and write the application service Yaml configuration file there. Multiple application services are supported. -- cgit 1.4.1 From 084afbb6a06f2661bed503bf49b0291ce999c6c1 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 19:50:04 +0100 Subject: Rename the permissions variable to avoid confusion --- contrib/docker/start.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/docker/start.py b/contrib/docker/start.py index 8ade0f227d..13a10a11bb 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -15,7 +15,7 @@ if "SYNAPSE_SERVER_NAME" not in environ: print("Environment variable SYNAPSE_SERVER_NAME is mandatory, exiting.") sys.exit(2) -permissions = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991)) +ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991)) args = ["python", "-m", "synapse.app.homeserver", "--server-name", environ.get("SYNAPSE_SERVER_NAME"), "--report-stats", environ.get("SYNAPSE_REPORT_STATS", "no"), @@ -43,5 +43,5 @@ else: convert("/conf/log.config", "/compiled/%s.log.config" % environ.get("SYNAPSE_SERVER_NAME"), environ) # Generate missing keys and start synapse subprocess.check_output(args + ["--generate-keys"]) - subprocess.check_output(["chown", "-R", permissions, "/data"]) - os.execv("/sbin/su-exec", ["su-exec", permissions] + args) + subprocess.check_output(["chown", "-R", ownership, "/data"]) + os.execv("/sbin/su-exec", ["su-exec", ownership] + args) -- cgit 1.4.1 From b8a4dceb3cee6b69d1b1b882cef1f96a3ff6249f Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 20:41:41 +0100 Subject: Refactor the start script to better handle mandatory parameters --- contrib/docker/README.md | 2 +- contrib/docker/docker-compose.yml | 1 + contrib/docker/start.py | 56 +++++++++++++++++++++++---------------- 3 files changed, 35 insertions(+), 24 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 3710afb0cf..0493d2ee6e 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -90,7 +90,7 @@ Otherwise, a dynamic configuration file will be used. The following environment variables are available for configuration: * ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname. -* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``not``), enable anonymous +* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous statistics reporting back to the Matrix project which helps us to get funding. * ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if you run your own TLS-capable reverse proxy). diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index b07984ea34..3d0b3c0ea4 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -13,6 +13,7 @@ services: # See the readme for a full documentation of the environment settings environment: - SYNAPSE_SERVER_NAME=my.matrix.host + - SYNAPSE_REPORT_STATS=no - SYNAPSE_ENABLE_REGISTRATION=yes volumes: # You may either store all the files in a local folder diff --git a/contrib/docker/start.py b/contrib/docker/start.py index 13a10a11bb..32142bbe00 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -6,42 +6,52 @@ import sys import subprocess import glob +# Utility functions convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ)) -mode = sys.argv[1] if len(sys.argv) > 1 else None -environ = os.environ.copy() -# Check mandatory parameters and build the base start arguments -if "SYNAPSE_SERVER_NAME" not in environ: - print("Environment variable SYNAPSE_SERVER_NAME is mandatory, exiting.") - sys.exit(2) - -ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991)) -args = ["python", "-m", "synapse.app.homeserver", - "--server-name", environ.get("SYNAPSE_SERVER_NAME"), - "--report-stats", environ.get("SYNAPSE_REPORT_STATS", "no"), - "--config-path", environ.get("SYNAPSE_CONFIG_PATH", "/compiled/homeserver.yaml")] +def check_arguments(environ, args): + for argument in args: + if argument not in environ: + print("Environment variable %s is mandatory, exiting." % argument) + sys.exit(2) -# Generate any missing shared secret -for secret in ("SYNAPSE_REGISTRATION_SHARED_SECRET", "SYNAPSE_MACAROON_SECRET_KEY"): - if secret not in environ: - print("Generating a random secret for {}".format(secret)) - environ[secret] = os.urandom(32).encode("hex") +def generate_secrets(environ, secrets): + for secret in secrets: + if secret not in environ: + print("Generating a random secret for {}".format(secret)) + environ[secret] = os.urandom(32).encode("hex") -# Load appservices configurations -environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml") +# Prepare the configuration +mode = sys.argv[1] if len(sys.argv) > 1 else None +environ = os.environ.copy() +ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991)) +args = ["python", "-m", "synapse.app.homeserver"] # In generate mode, generate a configuration, missing keys, then exit if mode == "generate": - os.execv("/usr/local/bin/python", args + ["--generate-config"]) + check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_CONFIG_PATH")) + args += [ + "--server-name", environ["SYNAPSE_SERVER_NAME"], + "--report-stats", environ["SYNAPSE_REPORT_STATS"], + "--config-path", environ["SYNAPSE_CONFIG_PATH"], + "--generate-config" + ] + os.execv("/usr/local/bin/python", args) # In normal mode, generate missing keys if any, then run synapse else: # Parse the configuration file - if "SYNAPSE_CONFIG_PATH" not in environ: + if "SYNAPSE_CONFIG_PATH" in environ: + args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]] + else: + check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS")) + generate_secrets(environ, ("SYNAPSE_REGISTRATION_SHARED_SECRET", "SYNAPSE_MACAROON_SECRET_KEY")) + environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml") if not os.path.exists("/compiled"): os.mkdir("/compiled") convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ) - convert("/conf/log.config", "/compiled/%s.log.config" % environ.get("SYNAPSE_SERVER_NAME"), environ) + convert("/conf/log.config", "/compiled/%s.log.config" % environ["SYNAPSE_SERVER_NAME"], environ) + subprocess.check_output(["chown", "-R", ownership, "/data"]) + args += ["--config-path", "/compiled/homeserver.yaml"] # Generate missing keys and start synapse subprocess.check_output(args + ["--generate-keys"]) - subprocess.check_output(["chown", "-R", ownership, "/data"]) os.execv("/sbin/su-exec", ["su-exec", ownership] + args) -- cgit 1.4.1 From e174c46a295ca6e06b217b5dcbcf995b890e6d07 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 20:42:57 +0100 Subject: Use 'synapse' as a default postgres user in Docker examples --- contrib/docker/conf/homeserver.yaml | 4 ++-- contrib/docker/docker-compose.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml index e5d3f965e4..1ca1fe991f 100644 --- a/contrib/docker/conf/homeserver.yaml +++ b/contrib/docker/conf/homeserver.yaml @@ -50,9 +50,9 @@ listeners: database: name: "psycopg2" args: - user: "{{ POSTGRES_USER or "matrix" }}" + user: "{{ POSTGRES_USER or "synapse" }}" password: "{{ POSTGRES_PASSWORD }}" - database: "{{ POSTGRES_DB or "matrix" }}" + database: "{{ POSTGRES_DB or "synapse" }}" host: "{{ POSTGRES_HOST or "db" }}" port: "{{ POSTGRES_PORT or "5432" }}" cp_min: 5 diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 3d0b3c0ea4..e447bf1212 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -38,7 +38,7 @@ services: image: postgres:10-alpine # Change that password, of course! environment: - - POSTGRES_USER=matrix + - POSTGRES_USER=synapse - POSTGRES_PASSWORD=changeme volumes: # You may store the database tables in a local folder.. -- cgit 1.4.1 From 914a59cb8c12b25d77ed9a81e4543c23d7e10b5e Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 20:43:45 +0100 Subject: Disable the Web client in the Docker image --- contrib/docker/README.md | 1 - contrib/docker/conf/homeserver.yaml | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 0493d2ee6e..9f40dc0d58 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -94,7 +94,6 @@ variables are available for configuration: statistics reporting back to the Matrix project which helps us to get funding. * ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if you run your own TLS-capable reverse proxy). -* ``SYNAPSE_WEB_CLIENT``, set this variable to enable the embedded Web client. * ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on the Synapse instance. * ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server. diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml index 1ca1fe991f..19a2cbad29 100644 --- a/contrib/docker/conf/homeserver.yaml +++ b/contrib/docker/conf/homeserver.yaml @@ -12,7 +12,7 @@ tls_fingerprints: [] server_name: "{{ SYNAPSE_SERVER_NAME }}" pid_file: /homeserver.pid -web_client: {{ "True" if SYNAPSE_WEB_CLIENT else "False" }} +web_client: False soft_file_limit: 0 ## Ports ## @@ -26,7 +26,7 @@ listeners: tls: true x_forwarded: false resources: - - names: [client, webclient] + - names: [client] compress: true - names: [federation] # Federation APIs compress: false @@ -39,7 +39,7 @@ listeners: x_forwarded: false resources: - - names: [client, webclient] + - names: [client] compress: true - names: [federation] compress: false -- cgit 1.4.1 From a0af0054ec91e92a6843c121cd27e92ea63c1034 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 20:46:11 +0100 Subject: Honor the SYNAPSE_REPORT_STATS parameter in the Docker image --- contrib/docker/conf/homeserver.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml index 19a2cbad29..3b57f7174d 100644 --- a/contrib/docker/conf/homeserver.yaml +++ b/contrib/docker/conf/homeserver.yaml @@ -156,8 +156,13 @@ trusted_third_party_id_servers: ## Metrics ### +{% if SYNAPSE_REPORT_STATS.lower() == "yes" %} +enable_metrics: True +report_stats: True +{% else %} enable_metrics: False report_stats: False +{% endif %} ## API Configuration ## -- cgit 1.4.1 From ef1f8d4be6e970043b8283f5caa1ca764652ad56 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 20:53:12 +0100 Subject: Enable email server configuration from environment variables --- contrib/docker/README.md | 7 +++++++ contrib/docker/conf/homeserver.yaml | 33 +++++++++++++++++---------------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 9f40dc0d58..b74c72698c 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -119,3 +119,10 @@ Database specific values (will use SQLite if not set): * `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`] * `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy. * `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`] + +Mail server specific values (will not send emails if not set): + +* ``SYNAPSE_SMTP_HOST``, hostname to the mail server. +* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``]. +* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any. +* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any. diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml index 3b57f7174d..198b8ddee7 100644 --- a/contrib/docker/conf/homeserver.yaml +++ b/contrib/docker/conf/homeserver.yaml @@ -146,6 +146,7 @@ enable_registration: {{ "True" if SYNAPSE_ENABLE_REGISTRATION else "False" }} registration_shared_secret: "{{ SYNAPSE_REGISTRATION_SHARED_SECRET }}" bcrypt_rounds: 12 allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }} +enable_group_creation: true # The list of identity servers trusted to verify third party # identifiers by this server. @@ -200,19 +201,19 @@ perspectives: password_config: enabled: true -#email: -# enable_notifs: false -# smtp_host: "localhost" -# smtp_port: 25 -# smtp_user: "exampleusername" -# smtp_pass: "examplepassword" -# require_transport_security: False -# notif_from: "Your Friendly %(app)s Home Server " -# app_name: Matrix -# template_dir: res/templates -# notif_template_html: notif_mail.html -# notif_template_text: notif_mail.txt -# notif_for_new_users: True -# riot_base_url: "http://localhost/riot" - -enable_group_creation: true +{% if SYNAPSE_SMTP_HOST %} +email: + enable_notifs: false + smtp_host: "{{ SYNAPSE_SMTP_HOST }}" + smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }} + smtp_user: "{{ SYNAPSE_SMTP_USER }}" + smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}" + require_transport_security: False + notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}" + app_name: Matrix + template_dir: res/templates + notif_template_html: notif_mail.html + notif_template_text: notif_mail.txt + notif_for_new_users: True + riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}" +{% endif %} -- cgit 1.4.1 From b9b668e4bb6ec3c0ae6c696a4b7abf6f537e0504 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 21:39:36 +0100 Subject: Update to Alpine 3.7 and switch to libressl --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index bec884ce9b..8085f3d354 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ -FROM python:2-alpine +FROM docker.io/python:2-alpine3.7 -RUN apk add --no-cache --virtual .nacl_deps su-exec build-base libffi-dev zlib-dev openssl-dev libjpeg-turbo-dev linux-headers postgresql-dev +RUN apk add --no-cache --virtual .nacl_deps su-exec build-base libffi-dev zlib-dev libressl-dev libjpeg-turbo-dev linux-headers postgresql-dev COPY . /synapse -- cgit 1.4.1 From d8680c969bb2e8248436d7352ebf0f5cd1daea7b Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 21:55:35 +0100 Subject: Make it clear that the image has two modes of operation --- contrib/docker/README.md | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index b74c72698c..27e25afcef 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -21,23 +21,42 @@ You may have a local Python wheel cache available, in which case copy the releva ## Run +This image is designed to run either with an automatically generated configuration +file or with a custom configuration that requires manual edition. + +### Automated configuration + It is recommended that you use Docker Compose to run your containers, including this image and a Postgres server. A sample ``docker-compose.yml`` is provided, including example labels for reverse proxying and other artifacts. -Then, to run the server: +Read the section about environment variables and set at least mandatory variables, +then run the server: ``` docker-compose up -d ``` -In the case you specify a custom path for you configuration file and wish to -generate a fresh ``homeserver.yaml``, simply run: +### Manual configuration + +A sample ``docker-compose.yml`` is provided, including example labels for +reverse proxying and other artifacts. + +Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path, +to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run: ``` docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate ``` +Then, customize your configuration and run the server: + +``` +docker-compose up -d +``` + +### Without Compose + If you do not wish to use Compose, you may still run this image using plain Docker commands. Note that the following is just a guideline and you may need to add parameters to the docker run command to account for the network situation @@ -49,10 +68,10 @@ docker run \ --name synapse \ -v ${DATA_PATH}:/data \ -e SYNAPSE_SERVER_NAME=my.matrix.host \ - matrixdotorg/synapse:latest + -e SYNAPSE_REPORT_STATS=yes \ + docker.io/matrixdotorg/synapse:latest ``` - ## Volumes The image expects a single volume, located at ``/data``, that will hold: -- cgit 1.4.1 From 48e2c641b80ac57d16701e4a27f348ea4f2c66cc Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 21:58:12 +0100 Subject: Specify the Docker registry in the build tag --- contrib/docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 27e25afcef..221d9c53b5 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -12,7 +12,7 @@ use that server. Build the docker image with the `docker build` command from the root of the synapse repository. ``` -docker build -t matrixdotorg/synapse . +docker build -t docker.io/matrixdotorg/synapse . ``` The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:` where `` is the same as the release tag in the synapse git repository. -- cgit 1.4.1 From a03c382966a3219acd26851db9cc6558e5c53310 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Thu, 8 Feb 2018 22:00:43 +0100 Subject: Specify the Docker registry for the postgres image --- contrib/docker/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index e447bf1212..1d2aebbcd3 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -35,7 +35,7 @@ services: - traefik.port=8448 db: - image: postgres:10-alpine + image: docker.io/postgres:10-alpine # Change that password, of course! environment: - POSTGRES_USER=synapse -- cgit 1.4.1 From e511979fe6c4a03da3e9c1d16672e263f54ee2d3 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Fri, 9 Feb 2018 00:13:26 +0100 Subject: Make SYNAPSE_MACAROON_SECRET_KEY a mandatory option --- contrib/docker/README.md | 3 ++- contrib/docker/start.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 221d9c53b5..25c358c847 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -111,6 +111,8 @@ variables are available for configuration: * ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname. * ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous statistics reporting back to the Matrix project which helps us to get funding. +* ``SYNAPSE_MACAROON_SECRET_KEY`` (mandatory) secret for signing access tokens + to the server, set this to a proper random key. * ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if you run your own TLS-capable reverse proxy). * ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on @@ -130,7 +132,6 @@ Shared secrets, that will be initialized to random values if not set: * ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if registration is disable. -* ``SYNAPSE_MACAROON_SECRET_KEY``, secret for Macaroon. Database specific values (will use SQLite if not set): diff --git a/contrib/docker/start.py b/contrib/docker/start.py index 32142bbe00..d4c1140b1d 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -44,8 +44,8 @@ else: if "SYNAPSE_CONFIG_PATH" in environ: args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]] else: - check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS")) - generate_secrets(environ, ("SYNAPSE_REGISTRATION_SHARED_SECRET", "SYNAPSE_MACAROON_SECRET_KEY")) + check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_MACAROON_SECRET_KEY")) + generate_secrets(environ, ("SYNAPSE_REGISTRATION_SHARED_SECRET",)) environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml") if not os.path.exists("/compiled"): os.mkdir("/compiled") convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ) -- cgit 1.4.1 From ca70148c0569295a2b9ecdd1cd9cd85a203f20e7 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Fri, 9 Feb 2018 00:23:19 +0100 Subject: Fix the path to the log config file --- contrib/docker/conf/homeserver.yaml | 2 +- contrib/docker/start.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/docker/conf/homeserver.yaml b/contrib/docker/conf/homeserver.yaml index 198b8ddee7..6bc25bb45f 100644 --- a/contrib/docker/conf/homeserver.yaml +++ b/contrib/docker/conf/homeserver.yaml @@ -69,7 +69,7 @@ database: event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}" verbose: 0 log_file: "/data/homeserver.log" -log_config: "/data/{{ SYNAPSE_SERVER_NAME }}.log.config" +log_config: "/compiled/log.config" ## Ratelimiting ## diff --git a/contrib/docker/start.py b/contrib/docker/start.py index d4c1140b1d..75c30b8ac0 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -49,7 +49,7 @@ else: environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml") if not os.path.exists("/compiled"): os.mkdir("/compiled") convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ) - convert("/conf/log.config", "/compiled/%s.log.config" % environ["SYNAPSE_SERVER_NAME"], environ) + convert("/conf/log.config", "/compiled/log.config", environ) subprocess.check_output(["chown", "-R", ownership, "/data"]) args += ["--config-path", "/compiled/homeserver.yaml"] # Generate missing keys and start synapse -- cgit 1.4.1 From 6f0b1f85f9f34401219eab4b4977a63c698ce987 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sat, 10 Feb 2018 00:05:03 +0100 Subject: Generate macaroon and registration secrets, then store the results to the data dir --- contrib/docker/docker-compose.yml | 2 +- contrib/docker/start.py | 19 ++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 1d2aebbcd3..9e32dd87de 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -6,7 +6,7 @@ version: '3' services: synapse: - image: docker.io/matrixdotorg/synapse:latest + image: synapse #docker.io/matrixdotorg/synapse:latest # Since snyapse does not retry to connect to the database, restart upon # failure restart: unless-stopped diff --git a/contrib/docker/start.py b/contrib/docker/start.py index 75c30b8ac0..90e8b9c51a 100755 --- a/contrib/docker/start.py +++ b/contrib/docker/start.py @@ -16,10 +16,16 @@ def check_arguments(environ, args): sys.exit(2) def generate_secrets(environ, secrets): - for secret in secrets: + for name, secret in secrets.items(): if secret not in environ: - print("Generating a random secret for {}".format(secret)) - environ[secret] = os.urandom(32).encode("hex") + filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name) + if os.path.exists(filename): + with open(filename) as handle: value = handle.read() + else: + print("Generating a random secret for {}".format(name)) + value = os.urandom(32).encode("hex") + with open(filename, "w") as handle: handle.write(value) + environ[secret] = value # Prepare the configuration mode = sys.argv[1] if len(sys.argv) > 1 else None @@ -44,8 +50,11 @@ else: if "SYNAPSE_CONFIG_PATH" in environ: args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]] else: - check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_MACAROON_SECRET_KEY")) - generate_secrets(environ, ("SYNAPSE_REGISTRATION_SHARED_SECRET",)) + check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS")) + generate_secrets(environ, { + "registration": "SYNAPSE_REGISTRATION_SHARED_SECRET", + "macaroon": "SYNAPSE_MACAROON_SECRET_KEY" + }) environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml") if not os.path.exists("/compiled"): os.mkdir("/compiled") convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ) -- cgit 1.4.1 From b815aa0e2db8f50116b1443b559ca13fe6ad1750 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sat, 10 Feb 2018 21:59:58 +0100 Subject: Remove an accidentally committed test configuration --- contrib/docker/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 9e32dd87de..1d2aebbcd3 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -6,7 +6,7 @@ version: '3' services: synapse: - image: synapse #docker.io/matrixdotorg/synapse:latest + image: docker.io/matrixdotorg/synapse:latest # Since snyapse does not retry to connect to the database, restart upon # failure restart: unless-stopped -- cgit 1.4.1 From 07f1b7181997dca91b67dca7561ce4c532caf253 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sat, 10 Feb 2018 23:57:36 +0100 Subject: Explicitely provide the postgres password to synapse in the Compose example --- contrib/docker/docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 1d2aebbcd3..46e72601d3 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -15,6 +15,7 @@ services: - SYNAPSE_SERVER_NAME=my.matrix.host - SYNAPSE_REPORT_STATS=no - SYNAPSE_ENABLE_REGISTRATION=yes + - POSTGRES_PASSWORD=changeme volumes: # You may either store all the files in a local folder - ./files:/data -- cgit 1.4.1 From f44b7c022f6bc8b30cb8c446e0922b26b8b8eb5a Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sat, 10 Feb 2018 23:57:51 +0100 Subject: Disable logging to file and rely on the console when using Docker --- contrib/docker/conf/log.config | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/contrib/docker/conf/log.config b/contrib/docker/conf/log.config index 45e7eef953..b5c907c4f9 100644 --- a/contrib/docker/conf/log.config +++ b/contrib/docker/conf/log.config @@ -10,13 +10,6 @@ filters: request: "" handlers: - file: - class: logging.handlers.RotatingFileHandler - formatter: precise - filename: /data/homeserver.log - maxBytes: 104857600 - backupCount: 10 - filters: [context] console: class: logging.StreamHandler formatter: precise @@ -33,4 +26,4 @@ loggers: root: level: INFO - handlers: [file, console] + handlers: [console] -- cgit 1.4.1 From 5a6e54264d3a9f8b9b1f2e99db94e31ea3c21d24 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 9 Mar 2018 13:56:26 +0000 Subject: Make 'unexpected logging context' into warnings I think we've now fixed enough of these that the rest can be logged at warning. --- synapse/util/logcontext.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index d660ec785b..fdd4a93d07 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -163,7 +163,7 @@ class LoggingContext(object): current = self.set_current_context(self.previous_context) if current is not self: if current is self.sentinel: - logger.debug("Expected logging context %s has been lost", self) + logger.warn("Expected logging context %s has been lost", self) else: logger.warn( "Current logging context %s is not expected context %s", @@ -278,7 +278,7 @@ class PreserveLoggingContext(object): context = LoggingContext.set_current_context(self.current_context) if context != self.new_context: - logger.debug( + logger.warn( "Unexpected logging context: %s is not %s", context, self.new_context, ) -- cgit 1.4.1 From 7f8eebc8ee801467b0a04aa0fb07b1ba58866f22 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 7 Apr 2018 01:39:45 +0200 Subject: Open config file in non-bytes mode Nothing written into it is encoded, so it makes little sense, but it does break in python3 the way it was before. The variable names were adjusted to be less misleading. Signed-off-by: Adrian Tschira --- synapse/config/_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 32b439d20a..b748ed2b0a 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -281,15 +281,15 @@ class Config(object): ) if not cls.path_exists(config_dir_path): os.makedirs(config_dir_path) - with open(config_path, "wb") as config_file: - config_bytes, config = obj.generate_config( + with open(config_path, "w") as config_file: + config_str, config = obj.generate_config( config_dir_path=config_dir_path, server_name=server_name, report_stats=(config_args.report_stats == "yes"), is_generating_file=True ) obj.invoke_all("generate_files", config) - config_file.write(config_bytes) + config_file.write(config_str) print(( "A config file has been generated in %r for server name" " %r with corresponding SSL keys and self-signed" -- cgit 1.4.1 From a3f9ddbeded33c61a715c86ba5029954ba22c22d Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Tue, 10 Apr 2018 17:22:52 +0200 Subject: Open certificate files as bytes That's what pyOpenSSL expects on python3 Signed-off-by: Adrian Tschira --- synapse/config/tls.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 29eb012ddb..b66154bc7c 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -133,7 +133,7 @@ class TlsConfig(Config): tls_dh_params_path = config["tls_dh_params_path"] if not self.path_exists(tls_private_key_path): - with open(tls_private_key_path, "w") as private_key_file: + with open(tls_private_key_path, "wb") as private_key_file: tls_private_key = crypto.PKey() tls_private_key.generate_key(crypto.TYPE_RSA, 2048) private_key_pem = crypto.dump_privatekey( @@ -148,7 +148,7 @@ class TlsConfig(Config): ) if not self.path_exists(tls_certificate_path): - with open(tls_certificate_path, "w") as certificate_file: + with open(tls_certificate_path, "wb") as certificate_file: cert = crypto.X509() subject = cert.get_subject() subject.CN = config["server_name"] -- cgit 1.4.1 From bfc2ade9b3784469a5f7b80119fbd1679b6268c8 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sun, 15 Apr 2018 17:24:25 +0200 Subject: Make event properties raise AttributeError instead They raised KeyError before. I'm changing this because the code uses hasattr() to check for the presence of a key. This worked accidentally before, because hasattr() silences all exceptions in python 2. However, in python3, this isn't the case anymore. I had a look around to see if anything depended on this raising a KeyError and I couldn't find anything. Of course, I could have simply missed it. Signed-off-by: Adrian Tschira --- synapse/events/__init__.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index e673e96cc0..d4d1b92f7a 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -48,13 +48,22 @@ class _EventInternalMetadata(object): def _event_dict_property(key): def getter(self): - return self._event_dict[key] + try: + return self._event_dict[key] + except KeyError: + raise AttributeError(key) def setter(self, v): - self._event_dict[key] = v + try: + self._event_dict[key] = v + except KeyError: + raise AttributeError(key) def delete(self): - del self._event_dict[key] + try: + del self._event_dict[key] + except KeyError: + raise AttributeError(key) return property( getter, -- cgit 1.4.1 From 2a3c33ff03aa88317c30da43cd3773c2789f0fcf Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sun, 15 Apr 2018 17:15:16 +0200 Subject: Use six.moves.urlparse The imports were shuffled around a bunch in py3 Signed-off-by: Adrian Tschira --- synapse/config/appservice.py | 4 ++-- synapse/http/matrixfederationclient.py | 3 +-- synapse/rest/client/v1/login.py | 2 +- synapse/rest/client/v1/room.py | 9 +++++---- synapse/rest/media/v1/_base.py | 2 +- synapse/rest/media/v1/media_repository.py | 2 +- tests/rest/client/v1/test_rooms.py | 14 +++++++------- tests/utils.py | 5 ++--- 8 files changed, 20 insertions(+), 21 deletions(-) diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 9a2359b6fd..277305e184 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -17,11 +17,11 @@ from ._base import Config, ConfigError from synapse.appservice import ApplicationService from synapse.types import UserID -import urllib import yaml import logging from six import string_types +from six.moves.urllib import parse as urlparse logger = logging.getLogger(__name__) @@ -105,7 +105,7 @@ def _load_appservice(hostname, as_info, config_filename): ) localpart = as_info["sender_localpart"] - if urllib.quote(localpart) != localpart: + if urlparse.quote(localpart) != localpart: raise ValueError( "sender_localpart needs characters which are not URL encoded." ) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 60a29081e8..c2e5610f50 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -38,8 +38,7 @@ import logging import random import sys import urllib -import urlparse - +from six.moves.urllib import parse as urlparse logger = logging.getLogger(__name__) outbound_logger = logging.getLogger("synapse.http.outbound") diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 45844aa2d2..34df5be4e9 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -25,7 +25,7 @@ from .base import ClientV1RestServlet, client_path_patterns import simplejson as json import urllib -import urlparse +from six.moves.urllib import parse as urlparse import logging from saml2 import BINDING_HTTP_POST diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 2ad0e5943b..fcf9c9ab44 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -28,8 +28,9 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, parse_integer ) +from six.moves.urllib import parse as urlparse + import logging -import urllib import simplejson as json logger = logging.getLogger(__name__) @@ -433,7 +434,7 @@ class RoomMessageListRestServlet(ClientV1RestServlet): as_client_event = "raw" not in request.args filter_bytes = request.args.get("filter", None) if filter_bytes: - filter_json = urllib.unquote(filter_bytes[-1]).decode("UTF-8") + filter_json = urlparse.unquote(filter_bytes[-1]).decode("UTF-8") event_filter = Filter(json.loads(filter_json)) else: event_filter = None @@ -718,8 +719,8 @@ class RoomTypingRestServlet(ClientV1RestServlet): def on_PUT(self, request, room_id, user_id): requester = yield self.auth.get_user_by_req(request) - room_id = urllib.unquote(room_id) - target_user = UserID.from_string(urllib.unquote(user_id)) + room_id = urlparse.unquote(room_id) + target_user = UserID.from_string(urlparse.unquote(user_id)) content = parse_json_object_from_request(request) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index e7ac01da01..d9c4af9389 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -28,7 +28,7 @@ import os import logging import urllib -import urlparse +from six.moves.urllib import parse as urlparse logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index bb79599379..9800ce7581 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -47,7 +47,7 @@ import shutil import cgi import logging -import urlparse +from six.moves.urllib import parse as urlparse logger = logging.getLogger(__name__) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 7e8966a1a8..d763400eaf 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -24,7 +24,7 @@ from synapse.api.constants import Membership from synapse.types import UserID import json -import urllib +from six.moves.urllib import parse as urlparse from ....utils import MockHttpResource, setup_test_homeserver from .utils import RestTestCase @@ -766,7 +766,7 @@ class RoomMemberStateTestCase(RestTestCase): @defer.inlineCallbacks def test_rooms_members_self(self): path = "/rooms/%s/state/m.room.member/%s" % ( - urllib.quote(self.room_id), self.user_id + urlparse.quote(self.room_id), self.user_id ) # valid join message (NOOP since we made the room) @@ -786,7 +786,7 @@ class RoomMemberStateTestCase(RestTestCase): def test_rooms_members_other(self): self.other_id = "@zzsid1:red" path = "/rooms/%s/state/m.room.member/%s" % ( - urllib.quote(self.room_id), self.other_id + urlparse.quote(self.room_id), self.other_id ) # valid invite message @@ -802,7 +802,7 @@ class RoomMemberStateTestCase(RestTestCase): def test_rooms_members_other_custom_keys(self): self.other_id = "@zzsid1:red" path = "/rooms/%s/state/m.room.member/%s" % ( - urllib.quote(self.room_id), self.other_id + urlparse.quote(self.room_id), self.other_id ) # valid invite message with custom key @@ -859,7 +859,7 @@ class RoomMessagesTestCase(RestTestCase): @defer.inlineCallbacks def test_invalid_puts(self): path = "/rooms/%s/send/m.room.message/mid1" % ( - urllib.quote(self.room_id)) + urlparse.quote(self.room_id)) # missing keys or invalid json (code, response) = yield self.mock_resource.trigger( "PUT", path, '{}' @@ -894,7 +894,7 @@ class RoomMessagesTestCase(RestTestCase): @defer.inlineCallbacks def test_rooms_messages_sent(self): path = "/rooms/%s/send/m.room.message/mid1" % ( - urllib.quote(self.room_id)) + urlparse.quote(self.room_id)) content = '{"body":"test","msgtype":{"type":"a"}}' (code, response) = yield self.mock_resource.trigger("PUT", path, content) @@ -911,7 +911,7 @@ class RoomMessagesTestCase(RestTestCase): # m.text message type path = "/rooms/%s/send/m.room.message/mid2" % ( - urllib.quote(self.room_id)) + urlparse.quote(self.room_id)) content = '{"body":"test2","msgtype":"m.text"}' (code, response) = yield self.mock_resource.trigger("PUT", path, content) self.assertEquals(200, code, msg=str(response)) diff --git a/tests/utils.py b/tests/utils.py index f15317d27b..9e815d8643 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -15,8 +15,7 @@ import hashlib from inspect import getcallargs -import urllib -import urlparse +from six.moves.urllib import parse as urlparse from mock import Mock, patch from twisted.internet import defer, reactor @@ -234,7 +233,7 @@ class MockHttpResource(HttpServer): if matcher: try: args = [ - urllib.unquote(u).decode("UTF-8") + urlparse.unquote(u).decode("UTF-8") for u in matcher.groups() ] -- cgit 1.4.1 From 1ea904b9f09ea6a9df7792f61029d0250602d46b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Apr 2018 00:53:18 +0100 Subject: Use deferred.addTimeout instead of time_bound_deferred This doesn't feel like a wheel we need to reinvent. --- synapse/http/__init__.py | 22 +++++++++++++ synapse/http/client.py | 20 +++++------- synapse/http/matrixfederationclient.py | 35 ++++++++++----------- synapse/notifier.py | 23 +++++++------- synapse/util/__init__.py | 56 ---------------------------------- tests/util/test_clock.py | 33 -------------------- 6 files changed, 59 insertions(+), 130 deletions(-) delete mode 100644 tests/util/test_clock.py diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index bfebb0f644..20e568bc43 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,3 +13,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from twisted.internet.defer import CancelledError +from twisted.python import failure + +from synapse.api.errors import SynapseError + + +class RequestTimedOutError(SynapseError): + """Exception representing timeout of an outbound request""" + def __init__(self): + super(RequestTimedOutError, self).__init__(504, "Timed out") + + +def cancelled_to_request_timed_out_error(value): + """Turns CancelledErrors into RequestTimedOutErrors. + + For use with deferred.addTimeout() + """ + if isinstance(value, failure.Failure): + value.trap(CancelledError) + raise RequestTimedOutError() + return value diff --git a/synapse/http/client.py b/synapse/http/client.py index f3e4973c2e..35c8d51e71 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,9 +19,9 @@ from OpenSSL.SSL import VERIFY_NONE from synapse.api.errors import ( CodeMessageException, MatrixCodeMessageException, SynapseError, Codes, ) +from synapse.http import cancelled_to_request_timed_out_error from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable -from synapse.util import logcontext import synapse.metrics from synapse.http.endpoint import SpiderEndpoint @@ -95,21 +96,16 @@ class SimpleHttpClient(object): # counters to it outgoing_requests_counter.inc(method) - def send_request(): + logger.info("Sending request %s %s", method, uri) + + try: request_deferred = self.agent.request( method, uri, *args, **kwargs ) - - return self.clock.time_bound_deferred( - request_deferred, - time_out=60, + request_deferred.addTimeout( + 60, reactor, cancelled_to_request_timed_out_error, ) - - logger.info("Sending request %s %s", method, uri) - - try: - with logcontext.PreserveLoggingContext(): - response = yield send_request() + response = yield make_deferred_yieldable(request_deferred) incoming_responses_counter.inc(method, response.code) logger.info( diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 60a29081e8..fe4b1636a6 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,17 +13,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import synapse.util.retryutils from twisted.internet import defer, reactor, protocol from twisted.internet.error import DNSLookupError from twisted.web.client import readBody, HTTPConnectionPool, Agent from twisted.web.http_headers import Headers from twisted.web._newclient import ResponseDone +from synapse.http import cancelled_to_request_timed_out_error from synapse.http.endpoint import matrix_federation_endpoint +import synapse.metrics from synapse.util.async import sleep from synapse.util import logcontext -import synapse.metrics +from synapse.util.logcontext import make_deferred_yieldable +import synapse.util.retryutils from canonicaljson import encode_canonical_json @@ -184,21 +187,19 @@ class MatrixFederationHttpClient(object): producer = body_callback(method, http_url_bytes, headers_dict) try: - def send_request(): - request_deferred = self.agent.request( - method, - url_bytes, - Headers(headers_dict), - producer - ) - - return self.clock.time_bound_deferred( - request_deferred, - time_out=timeout / 1000. if timeout else 60, - ) - - with logcontext.PreserveLoggingContext(): - response = yield send_request() + request_deferred = self.agent.request( + method, + url_bytes, + Headers(headers_dict), + producer + ) + request_deferred.addTimeout( + timeout / 1000. if timeout else 60, + reactor, cancelled_to_request_timed_out_error, + ) + response = yield make_deferred_yieldable( + request_deferred, + ) log_result = "%d %s" % (response.code, response.phrase,) break diff --git a/synapse/notifier.py b/synapse/notifier.py index 0e40a4aad6..1e4f78b993 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -13,12 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +from twisted.internet import defer, reactor +from twisted.internet.defer import TimeoutError + from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state -from synapse.util import DeferredTimedOutError from synapse.util.logutils import log_function from synapse.util.async import ObservableDeferred from synapse.util.logcontext import PreserveLoggingContext, preserve_fn @@ -331,11 +332,11 @@ class Notifier(object): # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) + listener.deferred.addTimeout( + (end_time - now) / 1000., reactor, + ) with PreserveLoggingContext(): - yield self.clock.time_bound_deferred( - listener.deferred, - time_out=(end_time - now) / 1000. - ) + yield listener.deferred current_token = user_stream.current_token @@ -346,7 +347,7 @@ class Notifier(object): # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token - except DeferredTimedOutError: + except TimeoutError: break except defer.CancelledError: break @@ -551,13 +552,11 @@ class Notifier(object): if end_time <= now: break + listener.deferred.addTimeout((end_time - now) / 1000., reactor) try: with PreserveLoggingContext(): - yield self.clock.time_bound_deferred( - listener.deferred, - time_out=(end_time - now) / 1000. - ) - except DeferredTimedOutError: + yield listener.deferred + except TimeoutError: break except defer.CancelledError: break diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 756d8ffa32..814a7bf71b 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.errors import SynapseError from synapse.util.logcontext import PreserveLoggingContext from twisted.internet import defer, reactor, task @@ -24,11 +23,6 @@ import logging logger = logging.getLogger(__name__) -class DeferredTimedOutError(SynapseError): - def __init__(self): - super(DeferredTimedOutError, self).__init__(504, "Timed out") - - def unwrapFirstError(failure): # defer.gatherResults and DeferredLists wrap failures. failure.trap(defer.FirstError) @@ -85,53 +79,3 @@ class Clock(object): except Exception: if not ignore_errs: raise - - def time_bound_deferred(self, given_deferred, time_out): - if given_deferred.called: - return given_deferred - - ret_deferred = defer.Deferred() - - def timed_out_fn(): - e = DeferredTimedOutError() - - try: - ret_deferred.errback(e) - except Exception: - pass - - try: - given_deferred.cancel() - except Exception: - pass - - timer = None - - def cancel(res): - try: - self.cancel_call_later(timer) - except Exception: - pass - return res - - ret_deferred.addBoth(cancel) - - def success(res): - try: - ret_deferred.callback(res) - except Exception: - pass - - return res - - def err(res): - try: - ret_deferred.errback(res) - except Exception: - pass - - given_deferred.addCallbacks(callback=success, errback=err) - - timer = self.call_later(time_out, timed_out_fn) - - return ret_deferred diff --git a/tests/util/test_clock.py b/tests/util/test_clock.py deleted file mode 100644 index 9672603579..0000000000 --- a/tests/util/test_clock.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 Vector Creations Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from synapse import util -from twisted.internet import defer -from tests import unittest - - -class ClockTestCase(unittest.TestCase): - @defer.inlineCallbacks - def test_time_bound_deferred(self): - # just a deferred which never resolves - slow_deferred = defer.Deferred() - - clock = util.Clock() - time_bound = clock.time_bound_deferred(slow_deferred, 0.001) - - try: - yield time_bound - self.fail("Expected timedout error, but got nothing") - except util.DeferredTimedOutError: - pass -- cgit 1.4.1 From 13a2beabca14d31d6808022d4ca45dbe4f95d2d9 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 24 Apr 2018 15:43:30 +0100 Subject: Update CHANGES.rst fix formatting on line break --- CHANGES.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index cc2f4676ff..b7da58edbb 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -16,8 +16,7 @@ Changes: * move handling of auto_join_rooms to RegisterHandler (PR #2996) Thanks to @krombel! * Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh! * Document the behaviour of ResponseCache (PR #3059) -* Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107 -#3109, #3110) Thanks to @NotAFile! +* Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile! * update prometheus dashboard to use new metric names (PR #3069) Thanks to @krombel! * use python3-compatible prints (PR #3074) Thanks to @NotAFile! * Send federation events concurrently (PR #3078) -- cgit 1.4.1 From 6ab3b9c743753a9673192ef6ce41cfd4db443ca9 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 24 Apr 2018 16:39:20 +0100 Subject: Update CHANGES.rst Rephrase v0.28.0-rc1 summary --- CHANGES.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index cc2f4676ff..f4b63a512b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -3,7 +3,8 @@ Changes in synapse v0.28.0-rc1 (2018-04-24) Minor performance improvement to federation sending and bug fixes. -(Note: This release does not include state resolutions discussed in matrix live) +(Note: This release does not include the delta state resolution implementation discussed in matrix live) + Features: -- cgit 1.4.1 From e3a373f0023e99bdd2ff9bd37ded3571904b07cb Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 25 Apr 2018 14:58:43 +0100 Subject: remove duplicates from groups tables and rename inconsistently named indexes. Based on https://github.com/matrix-org/synapse/pull/3128 - thanks @vurpo\! --- .../schema/delta/48/group_unique_indexes.sql | 34 ++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 synapse/storage/schema/delta/48/group_unique_indexes.sql diff --git a/synapse/storage/schema/delta/48/group_unique_indexes.sql b/synapse/storage/schema/delta/48/group_unique_indexes.sql new file mode 100644 index 0000000000..9ea7a8f2e6 --- /dev/null +++ b/synapse/storage/schema/delta/48/group_unique_indexes.sql @@ -0,0 +1,34 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- remove duplicates from group_users table +DELETE FROM group_users WHERE ctid NOT IN ( + SELECT min(ctid) FROM group_users GROUP BY group_id, user_id +); +DROP INDEX groups_users_g_idx; +CREATE UNIQUE INDEX group_users_g_idx ON group_users(group_id, user_id); + +-- remove duplicates from group_invites table +DELETE FROM group_invites WHERE ctid NOT IN ( + SELECT min(ctid) FROM group_invites GROUP BY group_id, user_id +); +DROP INDEX groups_invites_g_idx; +CREATE UNIQUE INDEX group_invites_g_idx ON group_invites(group_id, user_id); + +-- rename other indexes to actually match their table names... +ALTER INDEX groups_users_u_idx RENAME TO group_users_u_idx; +ALTER INDEX groups_invites_u_idx RENAME TO group_invites_u_idx; +ALTER INDEX groups_rooms_g_idx RENAME TO group_rooms_g_idx; +ALTER INDEX groups_rooms_r_idx RENAME TO group_rooms_r_idx; -- cgit 1.4.1 From ba3166743c50cb85c9ab2d35eb31968f892260bc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 25 Apr 2018 15:11:18 +0100 Subject: Fix quarantine media admin API --- synapse/storage/room.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 740c036975..c0c01bcfbe 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -595,7 +595,7 @@ class RoomStore(RoomWorkerStore, SearchStore): while next_token: sql = """ SELECT stream_ordering, json FROM events - JOIN event_json USING (event_id) + JOIN event_json USING (room_id, event_id) WHERE room_id = ? AND stream_ordering < ? AND contains_url = ? AND outlier = ? -- cgit 1.4.1 From 22881b3d692f44696189a1e0eeb6e83999dae10c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 25 Apr 2018 15:32:04 +0100 Subject: Also fix reindexing of search --- synapse/storage/search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/search.py b/synapse/storage/search.py index 426cbe6e1a..6ba3e59889 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -77,7 +77,7 @@ class SearchStore(BackgroundUpdateStore): sql = ( "SELECT stream_ordering, event_id, room_id, type, json, " " origin_server_ts FROM events" - " JOIN event_json USING (event_id)" + " JOIN event_json USING (room_id, event_id)" " WHERE ? <= stream_ordering AND stream_ordering < ?" " AND (%s)" " ORDER BY stream_ordering DESC" -- cgit 1.4.1 From 7ec8e798b4bc67b7365be3fc743e236b9f3c76ae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 26 Apr 2018 11:31:22 +0100 Subject: Fix media admin APIs --- synapse/storage/room.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/room.py b/synapse/storage/room.py index c0c01bcfbe..ea6a189185 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -530,7 +530,7 @@ class RoomStore(RoomWorkerStore, SearchStore): # Convert the IDs to MXC URIs for media_id in local_mxcs: - local_media_mxcs.append("mxc://%s/%s" % (self.hostname, media_id)) + local_media_mxcs.append("mxc://%s/%s" % (self.hs.hostname, media_id)) for hostname, media_id in remote_mxcs: remote_media_mxcs.append("mxc://%s/%s" % (hostname, media_id)) @@ -619,7 +619,7 @@ class RoomStore(RoomWorkerStore, SearchStore): if matches: hostname = matches.group(1) media_id = matches.group(2) - if hostname == self.hostname: + if hostname == self.hs.hostname: local_media_mxcs.append(media_id) else: remote_media_mxcs.append((hostname, media_id)) -- cgit 1.4.1 From d78ada31662e0aeeb5d13ddd16aabf432574fffd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Apr 2018 12:34:40 +0100 Subject: Miscellaneous fixes to python_dependencies * add some doc about wtf this thing does * pin Twisted to < 18.4 * add explicit dep on six (fixes #3089) --- synapse/python_dependencies.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 5cabf7dabe..711cbb6c50 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -1,5 +1,6 @@ # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +19,18 @@ from distutils.version import LooseVersion logger = logging.getLogger(__name__) +# this dict maps from python package name to a list of modules we expect it to +# provide. +# +# the key is a "requirement specifier", as used as a parameter to `pip +# install`[1], or an `install_requires` argument to `setuptools.setup` [2]. +# +# the value is a sequence of strings; each entry should be the name of the +# python module, optionally followed by a version assertion which can be either +# ">=" or "==". +# +# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers. +# [2] https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies REQUIREMENTS = { "jsonschema>=2.5.1": ["jsonschema>=2.5.1"], "frozendict>=0.4": ["frozendict"], @@ -26,7 +39,11 @@ REQUIREMENTS = { "signedjson>=1.0.0": ["signedjson>=1.0.0"], "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"], "service_identity>=1.0.0": ["service_identity>=1.0.0"], - "Twisted>=16.0.0": ["twisted>=16.0.0"], + + # we break under Twisted 18.4 + # (https://github.com/matrix-org/synapse/issues/3135) + "Twisted>=16.0.0,<18.4": ["twisted>=16.0.0"], + "pyopenssl>=0.14": ["OpenSSL>=0.14"], "pyyaml": ["yaml"], "pyasn1": ["pyasn1"], @@ -39,6 +56,7 @@ REQUIREMENTS = { "pymacaroons-pynacl": ["pymacaroons"], "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], + "six": ["six"], } CONDITIONAL_REQUIREMENTS = { "web_client": { -- cgit 1.4.1 From dbf76fd4b9aa555ef9ab11455a9ed77014d991bd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Apr 2018 13:05:30 +0100 Subject: jenkins build: make sure we have a recent setuptools --- jenkins/prepare_synapse.sh | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh index ffcb1cfab9..219828fa7b 100755 --- a/jenkins/prepare_synapse.sh +++ b/jenkins/prepare_synapse.sh @@ -1,5 +1,7 @@ #! /bin/bash +set -eux + cd "`dirname $0`/.." TOX_DIR=$WORKSPACE/.tox @@ -14,7 +16,16 @@ fi tox -e py27 --notest -v TOX_BIN=$TOX_DIR/py27/bin -$TOX_BIN/pip install setuptools + +# cryptography 2.2 requires setuptools >= 18.5. +# +# older versions of virtualenv (?) give us a virtualenv with the same version +# of setuptools as is installed on the system python (and tox runs virtualenv +# under python3, so we get the version of setuptools that is installed on that). +# +# anyway, make sure that we have a recent enough setuptools. +$TOX_BIN/pip install 'setuptools>=18.5' + { python synapse/python_dependencies.py echo lxml psycopg2 } | xargs $TOX_BIN/pip install -- cgit 1.4.1 From 28dd536e80167677ce45bd8eeee95c6ff6eff66f Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 26 Apr 2018 15:51:39 +0100 Subject: update changelog and bump version to 0.28.0 --- CHANGES.rst | 9 +++++++++ synapse/__init__.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index b7da58edbb..74f454cb5b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,12 @@ +Changes in synapse v0.28.0-rc1 (2018-04-26) +=========================================== + +Bug Fixes: + +* Fix quarantine media admin API and search reindex (PR #3130) +* Fix media admin APIs (PR #3134) + + Changes in synapse v0.28.0-rc1 (2018-04-24) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index 2b2c440eb8..4924f44d4e 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.28.0-rc1" +__version__ = "0.28.0" -- cgit 1.4.1 From 31c8be956f9addc801808f9b22108326287f7143 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 01:56:58 +0100 Subject: also upgrade pip when installing --- jenkins/prepare_synapse.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh index 219828fa7b..a30179f2aa 100755 --- a/jenkins/prepare_synapse.sh +++ b/jenkins/prepare_synapse.sh @@ -26,6 +26,10 @@ TOX_BIN=$TOX_DIR/py27/bin # anyway, make sure that we have a recent enough setuptools. $TOX_BIN/pip install 'setuptools>=18.5' +# we also need a semi-recent version of pip, because old ones fail to install +# the "enum34" dependency of cryptography. +$TOX_BIN/pip install 'pip>=10' + { python synapse/python_dependencies.py echo lxml psycopg2 } | xargs $TOX_BIN/pip install -- cgit 1.4.1 From 9255a6cb17716c022ebae1dbe9c142b78ca86ea7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 11:07:40 +0100 Subject: Improve exception handling for background processes There were a bunch of places where we fire off a process to happen in the background, but don't have any exception handling on it - instead relying on the unhandled error being logged when the relevent deferred gets garbage-collected. This is unsatisfactory for a number of reasons: - logging on garbage collection is best-effort and may happen some time after the error, if at all - it can be hard to figure out where the error actually happened. - it is logged as a scary CRITICAL error which (a) I always forget to grep for and (b) it's not really CRITICAL if a background process we don't care about fails. So this is an attempt to add exception handling to everything we fire off into the background. --- synapse/app/appservice.py | 15 +++-- synapse/app/federation_sender.py | 27 +++++---- synapse/app/pusher.py | 31 +++++----- synapse/app/synchrotron.py | 95 ++++++++++++++++--------------- synapse/app/user_dir.py | 13 ++++- synapse/appservice/scheduler.py | 25 ++++---- synapse/crypto/keyring.py | 93 +++++++++++++++--------------- synapse/federation/transaction_queue.py | 2 + synapse/federation/transport/server.py | 13 ++++- synapse/groups/attestations.py | 44 +++++++------- synapse/handlers/message.py | 22 +++++-- synapse/handlers/presence.py | 19 +++++-- synapse/handlers/receipts.py | 61 ++++++++++---------- synapse/handlers/typing.py | 43 +++++++------- synapse/notifier.py | 13 +++-- synapse/push/emailpusher.py | 11 ++-- synapse/push/httppusher.py | 5 +- synapse/rest/media/v1/storage_provider.py | 9 ++- synapse/storage/event_push_actions.py | 24 +++++--- synapse/util/logcontext.py | 7 ++- 20 files changed, 335 insertions(+), 237 deletions(-) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index f2540023a7..58f2c9d68c 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -32,10 +32,10 @@ from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import reactor +from twisted.internet import reactor, defer from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.appservice") @@ -112,9 +112,14 @@ class ASReplicationHandler(ReplicationClientHandler): if stream_name == "events": max_stream_id = self.store.get_room_max_stream_ordering() - preserve_fn( - self.appservice_handler.notify_interested_services - )(max_stream_id) + run_in_background(self._notify_app_services, max_stream_id) + + @defer.inlineCallbacks + def _notify_app_services(self, room_stream_id): + try: + yield self.appservice_handler.notify_interested_services(room_stream_id) + except Exception: + logger.exception("Error notifying application services of event") def start(config_options): diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 0cc3331519..4f2a9ca21a 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -237,19 +237,22 @@ class FederationSenderHandler(object): @defer.inlineCallbacks def update_token(self, token): - self.federation_position = token - - # We linearize here to ensure we don't have races updating the token - with (yield self._fed_position_linearizer.queue(None)): - if self._last_ack < self.federation_position: - yield self.store.update_federation_out_pos( - "federation", self.federation_position - ) + try: + self.federation_position = token + + # We linearize here to ensure we don't have races updating the token + with (yield self._fed_position_linearizer.queue(None)): + if self._last_ack < self.federation_position: + yield self.store.update_federation_out_pos( + "federation", self.federation_position + ) - # We ACK this token over replication so that the master can drop - # its in memory queues - self.replication_client.send_federation_ack(self.federation_position) - self._last_ack = self.federation_position + # We ACK this token over replication so that the master can drop + # its in memory queues + self.replication_client.send_federation_ack(self.federation_position) + self._last_ack = self.federation_position + except Exception: + logger.exception("Error updating federation stream position") if __name__ == '__main__': diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index d5c3a85195..739d113ad5 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -144,20 +144,23 @@ class PusherReplicationHandler(ReplicationClientHandler): @defer.inlineCallbacks def poke_pushers(self, stream_name, token, rows): - if stream_name == "pushers": - for row in rows: - if row.deleted: - yield self.stop_pusher(row.user_id, row.app_id, row.pushkey) - else: - yield self.start_pusher(row.user_id, row.app_id, row.pushkey) - elif stream_name == "events": - yield self.pusher_pool.on_new_notifications( - token, token, - ) - elif stream_name == "receipts": - yield self.pusher_pool.on_new_receipts( - token, token, set(row.room_id for row in rows) - ) + try: + if stream_name == "pushers": + for row in rows: + if row.deleted: + yield self.stop_pusher(row.user_id, row.app_id, row.pushkey) + else: + yield self.start_pusher(row.user_id, row.app_id, row.pushkey) + elif stream_name == "events": + yield self.pusher_pool.on_new_notifications( + token, token, + ) + elif stream_name == "receipts": + yield self.pusher_pool.on_new_receipts( + token, token, set(row.room_id for row in rows) + ) + except Exception: + logger.exception("Error poking pushers") def stop_pusher(self, user_id, app_id, pushkey): key = "%s:%s" % (app_id, pushkey) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 2fddcd935a..777da564d7 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -340,55 +340,58 @@ class SyncReplicationHandler(ReplicationClientHandler): @defer.inlineCallbacks def process_and_notify(self, stream_name, token, rows): - if stream_name == "events": - # We shouldn't get multiple rows per token for events stream, so - # we don't need to optimise this for multiple rows. - for row in rows: - event = yield self.store.get_event(row.event_id) - extra_users = () - if event.type == EventTypes.Member: - extra_users = (event.state_key,) - max_token = self.store.get_room_max_stream_ordering() - self.notifier.on_new_room_event( - event, token, max_token, extra_users + try: + if stream_name == "events": + # We shouldn't get multiple rows per token for events stream, so + # we don't need to optimise this for multiple rows. + for row in rows: + event = yield self.store.get_event(row.event_id) + extra_users = () + if event.type == EventTypes.Member: + extra_users = (event.state_key,) + max_token = self.store.get_room_max_stream_ordering() + self.notifier.on_new_room_event( + event, token, max_token, extra_users + ) + elif stream_name == "push_rules": + self.notifier.on_new_event( + "push_rules_key", token, users=[row.user_id for row in rows], ) - elif stream_name == "push_rules": - self.notifier.on_new_event( - "push_rules_key", token, users=[row.user_id for row in rows], - ) - elif stream_name in ("account_data", "tag_account_data",): - self.notifier.on_new_event( - "account_data_key", token, users=[row.user_id for row in rows], - ) - elif stream_name == "receipts": - self.notifier.on_new_event( - "receipt_key", token, rooms=[row.room_id for row in rows], - ) - elif stream_name == "typing": - self.typing_handler.process_replication_rows(token, rows) - self.notifier.on_new_event( - "typing_key", token, rooms=[row.room_id for row in rows], - ) - elif stream_name == "to_device": - entities = [row.entity for row in rows if row.entity.startswith("@")] - if entities: + elif stream_name in ("account_data", "tag_account_data",): self.notifier.on_new_event( - "to_device_key", token, users=entities, + "account_data_key", token, users=[row.user_id for row in rows], ) - elif stream_name == "device_lists": - all_room_ids = set() - for row in rows: - room_ids = yield self.store.get_rooms_for_user(row.user_id) - all_room_ids.update(room_ids) - self.notifier.on_new_event( - "device_list_key", token, rooms=all_room_ids, - ) - elif stream_name == "presence": - yield self.presence_handler.process_replication_rows(token, rows) - elif stream_name == "receipts": - self.notifier.on_new_event( - "groups_key", token, users=[row.user_id for row in rows], - ) + elif stream_name == "receipts": + self.notifier.on_new_event( + "receipt_key", token, rooms=[row.room_id for row in rows], + ) + elif stream_name == "typing": + self.typing_handler.process_replication_rows(token, rows) + self.notifier.on_new_event( + "typing_key", token, rooms=[row.room_id for row in rows], + ) + elif stream_name == "to_device": + entities = [row.entity for row in rows if row.entity.startswith("@")] + if entities: + self.notifier.on_new_event( + "to_device_key", token, users=entities, + ) + elif stream_name == "device_lists": + all_room_ids = set() + for row in rows: + room_ids = yield self.store.get_rooms_for_user(row.user_id) + all_room_ids.update(room_ids) + self.notifier.on_new_event( + "device_list_key", token, rooms=all_room_ids, + ) + elif stream_name == "presence": + yield self.presence_handler.process_replication_rows(token, rows) + elif stream_name == "receipts": + self.notifier.on_new_event( + "groups_key", token, users=[row.user_id for row in rows], + ) + except Exception: + logger.exception("Error processing replication") def start(config_options): diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 5f845e80d1..5ba7e9b416 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -39,10 +39,10 @@ from synapse.storage.engines import create_engine from synapse.storage.user_directory import UserDirectoryStore from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string -from twisted.internet import reactor +from twisted.internet import reactor, defer from twisted.web.resource import NoResource logger = logging.getLogger("synapse.app.user_dir") @@ -164,7 +164,14 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler): stream_name, token, rows ) if stream_name == "current_state_deltas": - preserve_fn(self.user_directory.notify_new_event)() + run_in_background(self._notify_directory) + + @defer.inlineCallbacks + def _notify_directory(self): + try: + yield self.user_directory.notify_new_event() + except Exception: + logger.exception("Error notifiying user directory of state update") def start(config_options): diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 6da315473d..dfc8d1b42e 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -176,17 +176,20 @@ class _TransactionController(object): @defer.inlineCallbacks def _start_recoverer(self, service): - yield self.store.set_appservice_state( - service, - ApplicationServiceState.DOWN - ) - logger.info( - "Application service falling behind. Starting recoverer. AS ID %s", - service.id - ) - recoverer = self.recoverer_fn(service, self.on_recovered) - self.add_recoverers([recoverer]) - recoverer.recover() + try: + yield self.store.set_appservice_state( + service, + ApplicationServiceState.DOWN + ) + logger.info( + "Application service falling behind. Starting recoverer. AS ID %s", + service.id + ) + recoverer = self.recoverer_fn(service, self.on_recovered) + self.add_recoverers([recoverer]) + recoverer.recover() + except Exception: + logger.exception("Error starting AS recoverer") @defer.inlineCallbacks def _is_service_up(self, service): diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index fce83d445f..32cbddbc53 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -146,53 +146,56 @@ class Keyring(object): verify_requests (List[VerifyKeyRequest]): """ - # create a deferred for each server we're going to look up the keys - # for; we'll resolve them once we have completed our lookups. - # These will be passed into wait_for_previous_lookups to block - # any other lookups until we have finished. - # The deferreds are called with no logcontext. - server_to_deferred = { - rq.server_name: defer.Deferred() - for rq in verify_requests - } - - # We want to wait for any previous lookups to complete before - # proceeding. - yield self.wait_for_previous_lookups( - [rq.server_name for rq in verify_requests], - server_to_deferred, - ) - - # Actually start fetching keys. - self._get_server_verify_keys(verify_requests) - - # When we've finished fetching all the keys for a given server_name, - # resolve the deferred passed to `wait_for_previous_lookups` so that - # any lookups waiting will proceed. - # - # map from server name to a set of request ids - server_to_request_ids = {} - - for verify_request in verify_requests: - server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids.setdefault(server_name, set()).add(request_id) - - def remove_deferreds(res, verify_request): - server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids[server_name].discard(request_id) - if not server_to_request_ids[server_name]: - d = server_to_deferred.pop(server_name, None) - if d: - d.callback(None) - return res - - for verify_request in verify_requests: - verify_request.deferred.addBoth( - remove_deferreds, verify_request, + try: + # create a deferred for each server we're going to look up the keys + # for; we'll resolve them once we have completed our lookups. + # These will be passed into wait_for_previous_lookups to block + # any other lookups until we have finished. + # The deferreds are called with no logcontext. + server_to_deferred = { + rq.server_name: defer.Deferred() + for rq in verify_requests + } + + # We want to wait for any previous lookups to complete before + # proceeding. + yield self.wait_for_previous_lookups( + [rq.server_name for rq in verify_requests], + server_to_deferred, ) + # Actually start fetching keys. + self._get_server_verify_keys(verify_requests) + + # When we've finished fetching all the keys for a given server_name, + # resolve the deferred passed to `wait_for_previous_lookups` so that + # any lookups waiting will proceed. + # + # map from server name to a set of request ids + server_to_request_ids = {} + + for verify_request in verify_requests: + server_name = verify_request.server_name + request_id = id(verify_request) + server_to_request_ids.setdefault(server_name, set()).add(request_id) + + def remove_deferreds(res, verify_request): + server_name = verify_request.server_name + request_id = id(verify_request) + server_to_request_ids[server_name].discard(request_id) + if not server_to_request_ids[server_name]: + d = server_to_deferred.pop(server_name, None) + if d: + d.callback(None) + return res + + for verify_request in verify_requests: + verify_request.deferred.addBoth( + remove_deferreds, verify_request, + ) + except Exception: + logger.exception("Error starting key lookups") + @defer.inlineCallbacks def wait_for_previous_lookups(self, server_names, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 963d938edd..ded2b1871a 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -323,6 +323,8 @@ class TransactionQueue(object): break yield self._process_presence_inner(states_map.values()) + except Exception: + logger.exception("Error sending presence states to servers") finally: self._processing_pending_presence = False diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index ff0656df3e..19d09f5422 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -25,7 +25,7 @@ from synapse.http.servlet import ( ) from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.versionstring import get_version_string -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background from synapse.types import ThirdPartyInstanceID, get_domain_from_id import functools @@ -152,11 +152,18 @@ class Authenticator(object): # alive retry_timings = yield self.store.get_destination_retry_timings(origin) if retry_timings and retry_timings["retry_last_ts"]: - logger.info("Marking origin %r as up", origin) - preserve_fn(self.store.set_destination_retry_timings)(origin, 0, 0) + run_in_background(self._reset_retry_timings, origin) defer.returnValue(origin) + @defer.inlineCallbacks + def _reset_retry_timings(self, origin): + try: + logger.info("Marking origin %r as up", origin) + yield self.store.set_destination_retry_timings(origin, 0, 0) + except Exception: + logger.exception("Error resetting retry timings on %s", origin) + class BaseFederationServlet(object): REQUIRE_AUTH = True diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 1fb709e6c3..7187df2508 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -165,28 +165,32 @@ class GroupAttestionRenewer(object): @defer.inlineCallbacks def _renew_attestation(group_id, user_id): - if not self.is_mine_id(group_id): - destination = get_domain_from_id(group_id) - elif not self.is_mine_id(user_id): - destination = get_domain_from_id(user_id) - else: - logger.warn( - "Incorrectly trying to do attestations for user: %r in %r", - user_id, group_id, + try: + if not self.is_mine_id(group_id): + destination = get_domain_from_id(group_id) + elif not self.is_mine_id(user_id): + destination = get_domain_from_id(user_id) + else: + logger.warn( + "Incorrectly trying to do attestations for user: %r in %r", + user_id, group_id, + ) + yield self.store.remove_attestation_renewal(group_id, user_id) + return + + attestation = self.attestations.create_attestation(group_id, user_id) + + yield self.transport_client.renew_group_attestation( + destination, group_id, user_id, + content={"attestation": attestation}, ) - yield self.store.remove_attestation_renewal(group_id, user_id) - return - - attestation = self.attestations.create_attestation(group_id, user_id) - yield self.transport_client.renew_group_attestation( - destination, group_id, user_id, - content={"attestation": attestation}, - ) - - yield self.store.update_attestation_renewal( - group_id, user_id, attestation - ) + yield self.store.update_attestation_renewal( + group_id, user_id, attestation + ) + except Exception: + logger.exception("Error renewing attestation of %r in %r", + user_id, group_id) for row in rows: group_id = row["group_id"] diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 21628a8540..d168ff5b86 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -857,15 +857,25 @@ class EventCreationHandler(object): @defer.inlineCallbacks def _notify(): yield run_on_reactor() - self.notifier.on_new_room_event( - event, event_stream_id, max_stream_id, - extra_users=extra_users - ) + try: + self.notifier.on_new_room_event( + event, event_stream_id, max_stream_id, + extra_users=extra_users + ) + except Exception: + logger.exception("Error notifying about new room event") preserve_fn(_notify)() if event.type == EventTypes.Message: - presence = self.hs.get_presence_handler() # We don't want to block sending messages on any presence code. This # matters as sometimes presence code can take a while. - preserve_fn(presence.bump_presence_active_time)(requester.user) + run_in_background(self._bump_active_time, requester.user) + + @defer.inlineCallbacks + def _bump_active_time(self, user): + try: + presence = self.hs.get_presence_handler() + yield presence.bump_presence_active_time(user) + except Exception: + logger.exception("Error bumping presence active time") diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index a5e501897c..585f3e4da2 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -31,7 +31,7 @@ from synapse.storage.presence import UserPresenceState from synapse.util.caches.descriptors import cachedInlineCallbacks from synapse.util.async import Linearizer -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -254,6 +254,14 @@ class PresenceHandler(object): logger.info("Finished _persist_unpersisted_changes") + @defer.inlineCallbacks + def _update_states_and_catch_exception(self, new_states): + try: + res = yield self._update_states(new_states) + defer.returnValue(res) + except Exception: + logger.exception("Error updating presence") + @defer.inlineCallbacks def _update_states(self, new_states): """Updates presence of users. Sets the appropriate timeouts. Pokes @@ -364,7 +372,7 @@ class PresenceHandler(object): now=now, ) - preserve_fn(self._update_states)(changes) + run_in_background(self._update_states_and_catch_exception, changes) except Exception: logger.exception("Exception in _handle_timeouts loop") @@ -422,20 +430,23 @@ class PresenceHandler(object): @defer.inlineCallbacks def _end(): - if affect_presence: + try: self.user_to_num_current_syncs[user_id] -= 1 prev_state = yield self.current_state_for_user(user_id) yield self._update_states([prev_state.copy_and_replace( last_user_sync_ts=self.clock.time_msec(), )]) + except Exception: + logger.exception("Error updating presence after sync") @contextmanager def _user_syncing(): try: yield finally: - preserve_fn(_end)() + if affect_presence: + run_in_background(_end) defer.returnValue(_user_syncing()) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 3f215c2b4e..2e0672161c 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -135,37 +135,40 @@ class ReceiptsHandler(BaseHandler): """Given a list of receipts, works out which remote servers should be poked and pokes them. """ - # TODO: Some of this stuff should be coallesced. - for receipt in receipts: - room_id = receipt["room_id"] - receipt_type = receipt["receipt_type"] - user_id = receipt["user_id"] - event_ids = receipt["event_ids"] - data = receipt["data"] - - users = yield self.state.get_current_user_in_room(room_id) - remotedomains = set(get_domain_from_id(u) for u in users) - remotedomains = remotedomains.copy() - remotedomains.discard(self.server_name) - - logger.debug("Sending receipt to: %r", remotedomains) - - for domain in remotedomains: - self.federation.send_edu( - destination=domain, - edu_type="m.receipt", - content={ - room_id: { - receipt_type: { - user_id: { - "event_ids": event_ids, - "data": data, + try: + # TODO: Some of this stuff should be coallesced. + for receipt in receipts: + room_id = receipt["room_id"] + receipt_type = receipt["receipt_type"] + user_id = receipt["user_id"] + event_ids = receipt["event_ids"] + data = receipt["data"] + + users = yield self.state.get_current_user_in_room(room_id) + remotedomains = set(get_domain_from_id(u) for u in users) + remotedomains = remotedomains.copy() + remotedomains.discard(self.server_name) + + logger.debug("Sending receipt to: %r", remotedomains) + + for domain in remotedomains: + self.federation.send_edu( + destination=domain, + edu_type="m.receipt", + content={ + room_id: { + receipt_type: { + user_id: { + "event_ids": event_ids, + "data": data, + } } - } + }, }, - }, - key=(room_id, receipt_type, user_id), - ) + key=(room_id, receipt_type, user_id), + ) + except Exception: + logger.exception("Error pushing receipts to remote servers") @defer.inlineCallbacks def get_receipts_for_room(self, room_id, to_key): diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 77c0cf146f..823e2e27e1 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -205,28 +205,31 @@ class TypingHandler(object): @defer.inlineCallbacks def _push_remote(self, member, typing): - users = yield self.state.get_current_user_in_room(member.room_id) - self._member_last_federation_poke[member] = self.clock.time_msec() + try: + users = yield self.state.get_current_user_in_room(member.room_id) + self._member_last_federation_poke[member] = self.clock.time_msec() - now = self.clock.time_msec() - self.wheel_timer.insert( - now=now, - obj=member, - then=now + FEDERATION_PING_INTERVAL, - ) + now = self.clock.time_msec() + self.wheel_timer.insert( + now=now, + obj=member, + then=now + FEDERATION_PING_INTERVAL, + ) - for domain in set(get_domain_from_id(u) for u in users): - if domain != self.server_name: - self.federation.send_edu( - destination=domain, - edu_type="m.typing", - content={ - "room_id": member.room_id, - "user_id": member.user_id, - "typing": typing, - }, - key=member, - ) + for domain in set(get_domain_from_id(u) for u in users): + if domain != self.server_name: + self.federation.send_edu( + destination=domain, + edu_type="m.typing", + content={ + "room_id": member.room_id, + "user_id": member.user_id, + "typing": typing, + }, + key=member, + ) + except Exception: + logger.exception("Error pushing typing notif to remotes") @defer.inlineCallbacks def _recv_edu(self, origin, content): diff --git a/synapse/notifier.py b/synapse/notifier.py index 0e40a4aad6..939723a404 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -21,7 +21,7 @@ from synapse.handlers.presence import format_user_presence_state from synapse.util import DeferredTimedOutError from synapse.util.logutils import log_function from synapse.util.async import ObservableDeferred -from synapse.util.logcontext import PreserveLoggingContext, preserve_fn +from synapse.util.logcontext import PreserveLoggingContext, run_in_background from synapse.util.metrics import Measure from synapse.types import StreamToken from synapse.visibility import filter_events_for_client @@ -251,9 +251,7 @@ class Notifier(object): def _on_new_room_event(self, event, room_stream_id, extra_users=[]): """Notify any user streams that are interested in this room event""" # poke any interested application service. - preserve_fn(self.appservice_handler.notify_interested_services)( - room_stream_id - ) + run_in_background(self._notify_app_services, room_stream_id) if self.federation_sender: self.federation_sender.notify_new_events(room_stream_id) @@ -267,6 +265,13 @@ class Notifier(object): rooms=[event.room_id], ) + @defer.inlineCallbacks + def _notify_app_services(self, room_stream_id): + try: + yield self.appservice_handler.notify_interested_services(room_stream_id) + except Exception: + logger.exception("Error notifying application services of event") + def on_new_event(self, stream_key, new_token, users=[], rooms=[]): """ Used to inform listeners that something has happend event wise. diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 58df98a793..ba7286cb72 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -77,10 +77,13 @@ class EmailPusher(object): @defer.inlineCallbacks def on_started(self): if self.mailer is not None: - self.throttle_params = yield self.store.get_throttle_params_by_room( - self.pusher_id - ) - yield self._process() + try: + self.throttle_params = yield self.store.get_throttle_params_by_room( + self.pusher_id + ) + yield self._process() + except Exception: + logger.exception("Error starting email pusher") def on_stop(self): if self.timed_call: diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 2cbac571b8..1420d378ef 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -94,7 +94,10 @@ class HttpPusher(object): @defer.inlineCallbacks def on_started(self): - yield self._process() + try: + yield self._process() + except Exception: + logger.exception("Error starting http pusher") @defer.inlineCallbacks def on_new_notifications(self, min_stream_ordering, max_stream_ordering): diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index c188192f2b..0252afd9d3 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -18,7 +18,7 @@ from twisted.internet import defer, threads from .media_storage import FileResponder from synapse.config._base import Config -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background import logging import os @@ -87,7 +87,12 @@ class StorageProviderWrapper(StorageProvider): return self.backend.store_file(path, file_info) else: # TODO: Handle errors. - preserve_fn(self.backend.store_file)(path, file_info) + def store(): + try: + return self.backend.store_file(path, file_info) + except Exception: + logger.exception("Error storing file") + run_in_background(store) return defer.succeed(None) def fetch(self, path, file_info): diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index e78f8d0114..c22762eb5c 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -448,6 +448,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): "add_push_actions_to_staging", _add_push_actions_to_staging_txn ) + @defer.inlineCallbacks def remove_push_actions_from_staging(self, event_id): """Called if we failed to persist the event to ensure that stale push actions don't build up in the DB @@ -456,13 +457,22 @@ class EventPushActionsWorkerStore(SQLBaseStore): event_id (str) """ - return self._simple_delete( - table="event_push_actions_staging", - keyvalues={ - "event_id": event_id, - }, - desc="remove_push_actions_from_staging", - ) + try: + res = yield self._simple_delete( + table="event_push_actions_staging", + keyvalues={ + "event_id": event_id, + }, + desc="remove_push_actions_from_staging", + ) + defer.returnValue(res) + except Exception: + # this method is called from an exception handler, so propagating + # another exception here really isn't helpful - there's nothing + # the caller can do about it. Just log the exception and move on. + logger.exception( + "Error removing push actions after event persistence failure", + ) @defer.inlineCallbacks def _find_stream_orderings_for_times(self): diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index d59adc236e..d6587e4409 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -305,7 +305,12 @@ def run_in_background(f, *args, **kwargs): deferred returned by the funtion completes. Useful for wrapping functions that return a deferred which you don't yield - on. + on (for instance because you want to pass it to deferred.gatherResults()). + + Note that if you completely discard the result, you should make sure that + `f` doesn't raise any deferred exceptions, otherwise a scary-looking + CRITICAL error about an unhandled error will be logged without much + indication about where it came from. """ current = LoggingContext.current_context() res = f(*args, **kwargs) -- cgit 1.4.1 From 605defb9e4c274d61d9da86546e6fd6c78f3f6cf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 11:16:28 +0100 Subject: Add missing consumeErrors In general we want defer.gatherResults to consumeErrors, rather than having exceptions hanging around and getting logged as CRITICAL unhandled errors. --- synapse/handlers/e2e_keys.py | 4 ++-- synapse/push/pusherpool.py | 8 ++++++-- synapse/storage/stream.py | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 325c0c4a9f..7eb03ad32e 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -141,7 +141,7 @@ class E2eKeysHandler(object): yield make_deferred_yieldable(defer.gatherResults([ preserve_fn(do_remote_query)(destination) for destination in remote_queries_not_in_cache - ])) + ], consumeErrors=True)) defer.returnValue({ "device_keys": results, "failures": failures, @@ -244,7 +244,7 @@ class E2eKeysHandler(object): yield make_deferred_yieldable(defer.gatherResults([ preserve_fn(claim_client_keys)(destination) for destination in remote_queries - ])) + ], consumeErrors=True)) logger.info( "Claimed one-time-keys: %s", diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 134e89b371..2f467d1f9c 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -142,7 +142,9 @@ class PusherPool: ) ) - yield make_deferred_yieldable(defer.gatherResults(deferreds)) + yield make_deferred_yieldable( + defer.gatherResults(deferreds, consumeErrors=True), + ) except Exception: logger.exception("Exception in pusher on_new_notifications") @@ -167,7 +169,9 @@ class PusherPool: preserve_fn(p.on_new_receipts)(min_stream_id, max_stream_id) ) - yield make_deferred_yieldable(defer.gatherResults(deferreds)) + yield make_deferred_yieldable( + defer.gatherResults(deferreds, consumeErrors=True), + ) except Exception: logger.exception("Exception in pusher on_new_receipts") diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 2956c3b3e0..3b8b539993 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -202,7 +202,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): room_id, from_key, to_key, limit, order=order, ) for room_id in rm_ids - ])) + ], consumeErrors=True)) results.update(dict(zip(rm_ids, res))) defer.returnValue(results) -- cgit 1.4.1 From 6e10eed28e5da3700b6f545ad122dad1407cfb2c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 16 Feb 2018 11:16:29 +0000 Subject: Refactor event storage to not require state This is in preparation for using contexts that may or may not have the current_state_ids set. This will allow us to avoid unnecessarily pulling out state for an event on the master process when using workers. We also add a check to see if the state groups of the old extremities are the same as the new ones. --- synapse/storage/events.py | 104 +++++++++++++++++++++++++++++----------------- 1 file changed, 67 insertions(+), 37 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 5fe4a0e56c..a9269707df 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -22,7 +22,6 @@ import logging import simplejson as json from twisted.internet import defer - from synapse.storage.events_worker import EventsWorkerStore from synapse.util.async import ObservableDeferred from synapse.util.frozenutils import frozendict_json_encoder @@ -425,7 +424,9 @@ class EventsStore(EventsWorkerStore): ) current_state = yield self._get_new_state_after_events( room_id, - ev_ctx_rm, new_latest_event_ids, + ev_ctx_rm, + latest_event_ids, + new_latest_event_ids, ) if current_state is not None: current_state_for_room[room_id] = current_state @@ -513,7 +514,8 @@ class EventsStore(EventsWorkerStore): defer.returnValue(new_latest_event_ids) @defer.inlineCallbacks - def _get_new_state_after_events(self, room_id, events_context, new_latest_event_ids): + def _get_new_state_after_events(self, room_id, events_context, old_latest_event_ids, + new_latest_event_ids): """Calculate the current state dict after adding some new events to a room @@ -524,6 +526,9 @@ class EventsStore(EventsWorkerStore): events_context (list[(EventBase, EventContext)]): events and contexts which are being added to the room + old_latest_event_ids (iterable[str]): + the old forward extremities for the room. + new_latest_event_ids (iterable[str]): the new forward extremities for the room. @@ -534,64 +539,89 @@ class EventsStore(EventsWorkerStore): """ if not new_latest_event_ids: - defer.returnValue({}) + return # map from state_group to ((type, key) -> event_id) state map - state_groups = {} - missing_event_ids = [] - was_updated = False + state_groups_map = {} + for ev, ctx in events_context: + if ctx.state_group is None: + # I don't think this can happen, but let's double-check + raise Exception( + "Context for new extremity event %s has no state " + "group" % (ev.event_id, ), + ) + + if ctx.state_group in state_groups_map: + continue + + state_groups_map[ctx.state_group] = ctx.current_state_ids + + # We need to map the event_ids to their state groups. First, lets + # check if the event is one we're persisting and then we can pull the + # state group from its context. + # Otherwise we need to pull the state group from the database. + + # Set of events we need to fetch groups for. (We know none of the old + # extremities are going to be in events_context). + missing_event_ids = set(old_latest_event_ids) + + event_id_to_state_group = {} for event_id in new_latest_event_ids: - # First search in the list of new events we're adding, - # and then use the current state from that + # First search in the list of new events we're adding. for ev, ctx in events_context: if event_id == ev.event_id: - if ctx.current_state_ids is None: - raise Exception("Unknown current state") - - if ctx.state_group is None: - # I don't think this can happen, but let's double-check - raise Exception( - "Context for new extremity event %s has no state " - "group" % (event_id, ), - ) - - # If we've already seen the state group don't bother adding - # it to the state sets again - if ctx.state_group not in state_groups: - state_groups[ctx.state_group] = ctx.current_state_ids - if ctx.delta_ids or hasattr(ev, "state_key"): - was_updated = True + event_id_to_state_group[event_id] = ctx.state_group break else: # If we couldn't find it, then we'll need to pull # the state from the database - was_updated = True - missing_event_ids.append(event_id) - - if not was_updated: - return + missing_event_ids.add(event_id) if missing_event_ids: - # Now pull out the state for any missing events from DB + # Now pull out the state groups for any missing events from DB event_to_groups = yield self._get_state_group_for_events( missing_event_ids, ) + event_id_to_state_group.update(event_to_groups) + + # State groups of old_latest_event_ids + old_state_groups = set( + event_id_to_state_group[evid] for evid in old_latest_event_ids + ) + + # State groups of new_latest_event_ids + new_state_groups = set( + event_id_to_state_group[evid] for evid in new_latest_event_ids + ) - groups = set(event_to_groups.itervalues()) - set(state_groups.iterkeys()) + # If they old and new groups are the same then we don't need to do + # anything. + if old_state_groups == new_state_groups: + return - if groups: - group_to_state = yield self._get_state_for_groups(groups) - state_groups.update(group_to_state) + # Now that we have calculated new_state_groups we need to get + # their state IDs so we can resolve to a single state set. + missing_state = new_state_groups - set(state_groups_map) + if missing_state: + group_to_state = yield self._get_state_for_groups(missing_state) + state_groups_map.update(group_to_state) - if len(state_groups) == 1: + if len(new_state_groups) == 1: # If there is only one state group, then we know what the current # state is. - defer.returnValue(state_groups.values()[0]) + defer.returnValue(state_groups_map[new_state_groups.pop()]) + + # Ok, we need to defer to the state handler to resolve our state sets. def get_events(ev_ids): return self.get_events( ev_ids, get_prev_content=False, check_redacted=False, ) + + state_groups = { + sg: state_groups_map[sg] for sg in new_state_groups + } + events_map = {ev.event_id: ev for ev, _ in events_context} logger.debug("calling resolve_state_groups from preserve_events") res = yield self._state_resolution_handler.resolve_state_groups( -- cgit 1.4.1 From 6493b22b42685a6cc06c1113196d305e4d52deed Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 11:40:06 +0100 Subject: reraise exceptions more carefully We need to be careful (under python 2, at least) that when we reraise an exception after doing some error handling, we actually reraise the original exception rather than anything that might have been raised (and handled) during the error handling. --- synapse/handlers/federation.py | 16 ++++++++++------ synapse/handlers/message.py | 21 ++++++++++++++------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ae7e0d6da2..260df025f9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -19,9 +19,11 @@ import httplib import itertools import logging +import sys from signedjson.key import decode_verify_key_bytes from signedjson.sign import verify_signed_json +import six from twisted.internet import defer from unpaddedbase64 import decode_base64 @@ -1513,12 +1515,14 @@ class FederationHandler(BaseHandler): backfilled=backfilled, ) except: # noqa: E722, as we reraise the exception this is fine. - # Ensure that we actually remove the entries in the push actions - # staging area - logcontext.preserve_fn( - self.store.remove_push_actions_from_staging - )(event.event_id) - raise + tp, value, tb = sys.exc_info() + + logcontext.run_in_background( + self.store.remove_push_actions_from_staging, + event.event_id, + ) + + six.reraise(tp, value, tb) if not backfilled: # this intentionally does not yield: we don't care about the result diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 21628a8540..8e2e44bdcd 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -13,6 +13,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import simplejson +import sys + +from canonicaljson import encode_canonical_json +import six from twisted.internet import defer, reactor from twisted.python.failure import Failure @@ -34,11 +40,6 @@ from synapse.replication.http.send_event import send_event_to_master from ._base import BaseHandler -from canonicaljson import encode_canonical_json - -import logging -import simplejson - logger = logging.getLogger(__name__) @@ -729,8 +730,14 @@ class EventCreationHandler(object): except: # noqa: E722, as we reraise the exception this is fine. # Ensure that we actually remove the entries in the push actions # staging area, if we calculated them. - preserve_fn(self.store.remove_push_actions_from_staging)(event.event_id) - raise + tp, value, tb = sys.exc_info() + + run_in_background( + self.store.remove_push_actions_from_staging, + event.event_id, + ) + + six.reraise(tp, value, tb) @defer.inlineCallbacks def persist_and_notify_client_event( -- cgit 1.4.1 From 53849ea9d396874ae132ae3cd0a9e8d3ecb7e6d2 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 27 Apr 2018 12:11:39 +0100 Subject: Update CHANGES.rst --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0c8307e439..40d13c6484 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,4 +1,4 @@ -Changes in synapse v0.28.0-rc1 (2018-04-26) +Changes in synapse v0.28.0 (2018-04-26) =========================================== Bug Fixes: -- cgit 1.4.1 From 05ba7e3a44a689c67b67c617a9a23ce44da5a15e Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 27 Apr 2018 12:13:12 +0100 Subject: Update CHANGES.rst --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0c8307e439..40d13c6484 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,4 +1,4 @@ -Changes in synapse v0.28.0-rc1 (2018-04-26) +Changes in synapse v0.28.0 (2018-04-26) =========================================== Bug Fixes: -- cgit 1.4.1 From 13843f771ea64cfd4fb7e2f03854b9506f63fcfd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 12:17:13 +0100 Subject: Trap exceptions thrown within run_in_background Turn any exceptions that get thrown synchronously within run_in_background into Failures instead. --- synapse/util/logcontext.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index d59adc236e..127581a0b4 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -308,7 +308,13 @@ def run_in_background(f, *args, **kwargs): on. """ current = LoggingContext.current_context() - res = f(*args, **kwargs) + try: + res = f(*args, **kwargs) + except: # noqa: E722 + # the assumption here is that the caller doesn't want to be disturbed + # by synchronous exceptions, so let's turn them into Failures. + return defer.fail() + if isinstance(res, defer.Deferred) and not res.called: # The function will have reset the context before returning, so # we need to restore it now. -- cgit 1.4.1 From 9d2c1b8429996cb9766ac636034485e2a1d685cc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 12:52:30 +0100 Subject: Backport deferred.addTimeout Twisted 16.0 doesn't have addTimeout, so let's backport it. --- synapse/http/__init__.py | 2 +- synapse/http/client.py | 6 ++- synapse/http/matrixfederationclient.py | 7 ++-- synapse/notifier.py | 22 +++++++---- synapse/util/async.py | 67 ++++++++++++++++++++++++++++++++++ 5 files changed, 90 insertions(+), 14 deletions(-) diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 20e568bc43..0d47ccdb59 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -28,7 +28,7 @@ class RequestTimedOutError(SynapseError): def cancelled_to_request_timed_out_error(value): """Turns CancelledErrors into RequestTimedOutErrors. - For use with deferred.addTimeout() + For use with async.add_timeout_to_deferred """ if isinstance(value, failure.Failure): value.trap(CancelledError) diff --git a/synapse/http/client.py b/synapse/http/client.py index 35c8d51e71..62309c3365 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -20,6 +20,7 @@ from synapse.api.errors import ( CodeMessageException, MatrixCodeMessageException, SynapseError, Codes, ) from synapse.http import cancelled_to_request_timed_out_error +from synapse.util.async import add_timeout_to_deferred from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable import synapse.metrics @@ -102,8 +103,9 @@ class SimpleHttpClient(object): request_deferred = self.agent.request( method, uri, *args, **kwargs ) - request_deferred.addTimeout( - 60, reactor, cancelled_to_request_timed_out_error, + add_timeout_to_deferred( + request_deferred, + 60, cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable(request_deferred) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index fe4b1636a6..30036fe81c 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -22,7 +22,7 @@ from twisted.web._newclient import ResponseDone from synapse.http import cancelled_to_request_timed_out_error from synapse.http.endpoint import matrix_federation_endpoint import synapse.metrics -from synapse.util.async import sleep +from synapse.util.async import sleep, add_timeout_to_deferred from synapse.util import logcontext from synapse.util.logcontext import make_deferred_yieldable import synapse.util.retryutils @@ -193,9 +193,10 @@ class MatrixFederationHttpClient(object): Headers(headers_dict), producer ) - request_deferred.addTimeout( + add_timeout_to_deferred( + request_deferred, timeout / 1000. if timeout else 60, - reactor, cancelled_to_request_timed_out_error, + cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable( request_deferred, diff --git a/synapse/notifier.py b/synapse/notifier.py index 1e4f78b993..a1c06e8fca 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -13,15 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, reactor -from twisted.internet.defer import TimeoutError +from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state from synapse.util.logutils import log_function -from synapse.util.async import ObservableDeferred +from synapse.util.async import ( + ObservableDeferred, add_timeout_to_deferred, + DeferredTimeoutError, +) from synapse.util.logcontext import PreserveLoggingContext, preserve_fn from synapse.util.metrics import Measure from synapse.types import StreamToken @@ -332,8 +334,9 @@ class Notifier(object): # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) - listener.deferred.addTimeout( - (end_time - now) / 1000., reactor, + add_timeout_to_deferred( + listener.deferred, + (end_time - now) / 1000., ) with PreserveLoggingContext(): yield listener.deferred @@ -347,7 +350,7 @@ class Notifier(object): # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token - except TimeoutError: + except DeferredTimeoutError: break except defer.CancelledError: break @@ -552,11 +555,14 @@ class Notifier(object): if end_time <= now: break - listener.deferred.addTimeout((end_time - now) / 1000., reactor) + add_timeout_to_deferred( + listener.deferred.addTimeout, + (end_time - now) / 1000., + ) try: with PreserveLoggingContext(): yield listener.deferred - except TimeoutError: + except DeferredTimeoutError: break except defer.CancelledError: break diff --git a/synapse/util/async.py b/synapse/util/async.py index 0729bb2863..1df5c5600c 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -15,6 +15,8 @@ from twisted.internet import defer, reactor +from twisted.internet.defer import CancelledError +from twisted.python import failure from .logcontext import ( PreserveLoggingContext, make_deferred_yieldable, preserve_fn @@ -392,3 +394,68 @@ class ReadWriteLock(object): self.key_to_current_writer.pop(key) defer.returnValue(_ctx_manager()) + + +class DeferredTimeoutError(Exception): + """ + This error is raised by default when a L{Deferred} times out. + """ + + +def add_timeout_to_deferred(deferred, timeout, on_timeout_cancel=None): + """ + Add a timeout to a deferred by scheduling it to be cancelled after + timeout seconds. + + This is essentially a backport of deferred.addTimeout, which was introduced + in twisted 16.5. + + If the deferred gets timed out, it errbacks with a DeferredTimeoutError, + unless a cancelable function was passed to its initialization or unless + a different on_timeout_cancel callable is provided. + + Args: + deferred (defer.Deferred): deferred to be timed out + timeout (Number): seconds to time out after + + on_timeout_cancel (callable): A callable which is called immediately + after the deferred times out, and not if this deferred is + otherwise cancelled before the timeout. + + It takes an arbitrary value, which is the value of the deferred at + that exact point in time (probably a CancelledError Failure), and + the timeout. + + The default callable (if none is provided) will translate a + CancelledError Failure into a DeferredTimeoutError. + """ + timed_out = [False] + + def time_it_out(): + timed_out[0] = True + deferred.cancel() + + delayed_call = reactor.callLater(timeout, time_it_out) + + def convert_cancelled(value): + if timed_out[0]: + to_call = on_timeout_cancel or _cancelled_to_timed_out_error + return to_call(value, timeout) + return value + + deferred.addBoth(convert_cancelled) + + def cancel_timeout(result): + # stop the pending call to cancel the deferred if it's been fired + if delayed_call.active(): + delayed_call.cancel() + return result + + deferred.addBoth(cancel_timeout) + + +def _cancelled_to_timed_out_error(value, timeout): + if isinstance(value, failure.Failure): + value.trap(CancelledError) + raise DeferredTimeoutError(timeout, "Deferred") + return value -- cgit 1.4.1 From 2a13af23bc0561ab48e0a90528231c40ee209724 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 11:29:27 +0100 Subject: Use run_in_background in preference to preserve_fn While I was going through uses of preserve_fn for other PRs, I converted places which only use the wrapped function once to use run_in_background, to avoid creating the function object. --- synapse/app/federation_sender.py | 4 ++-- synapse/app/pusher.py | 4 ++-- synapse/app/synchrotron.py | 5 ++--- synapse/appservice/scheduler.py | 12 ++++++------ synapse/crypto/keyring.py | 28 ++++++++++++++++----------- synapse/federation/federation_client.py | 5 +++-- synapse/groups/attestations.py | 4 ++-- synapse/handlers/appservice.py | 5 ++++- synapse/handlers/e2e_keys.py | 6 +++--- synapse/handlers/federation.py | 16 +++++++++------ synapse/handlers/initial_sync.py | 12 +++++++----- synapse/handlers/message.py | 5 +++-- synapse/handlers/typing.py | 7 ++++--- synapse/push/pusherpool.py | 20 +++++++++++-------- synapse/rest/media/v1/preview_url_resource.py | 5 +++-- synapse/storage/events_worker.py | 5 +++-- synapse/storage/stream.py | 5 +++-- synapse/util/async.py | 4 ++-- synapse/util/file_consumer.py | 6 ++++-- synapse/util/logcontext.py | 2 +- synapse/util/ratelimitutils.py | 4 ++-- synapse/util/retryutils.py | 4 ++-- 22 files changed, 97 insertions(+), 71 deletions(-) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 0cc3331519..c6daa0d43f 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -38,7 +38,7 @@ from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.async import Linearizer from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor @@ -229,7 +229,7 @@ class FederationSenderHandler(object): # presence, typing, etc. if stream_name == "federation": send_queue.process_rows_for_federation(self.federation_sender, rows) - preserve_fn(self.update_token)(token) + run_in_background(self.update_token, token) # We also need to poke the federation sender when new events happen elif stream_name == "events": diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index d5c3a85195..8bd5c0c2b7 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -33,7 +33,7 @@ from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string from twisted.internet import defer, reactor @@ -140,7 +140,7 @@ class PusherReplicationHandler(ReplicationClientHandler): def on_rdata(self, stream_name, token, rows): super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows) - preserve_fn(self.poke_pushers)(stream_name, token, rows) + run_in_background(self.poke_pushers, stream_name, token, rows) @defer.inlineCallbacks def poke_pushers(self, stream_name, token, rows): diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 2fddcd935a..0c4ccc58bc 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -51,7 +51,7 @@ from synapse.storage.engines import create_engine from synapse.storage.presence import UserPresenceState from synapse.storage.roommember import RoomMemberStore from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.stringutils import random_string from synapse.util.versionstring import get_version_string @@ -327,8 +327,7 @@ class SyncReplicationHandler(ReplicationClientHandler): def on_rdata(self, stream_name, token, rows): super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows) - - preserve_fn(self.process_and_notify)(stream_name, token, rows) + run_in_background(self.process_and_notify, stream_name, token, rows) def get_streams_to_replicate(self): args = super(SyncReplicationHandler, self).get_streams_to_replicate() diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 6da315473d..ba1631b5c8 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -51,7 +51,7 @@ components. from twisted.internet import defer from synapse.appservice import ApplicationServiceState -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background from synapse.util.metrics import Measure import logging @@ -106,7 +106,7 @@ class _ServiceQueuer(object): def enqueue(self, service, event): # if this service isn't being sent something self.queued_events.setdefault(service.id, []).append(event) - preserve_fn(self._send_request)(service) + run_in_background(self._send_request, service) @defer.inlineCallbacks def _send_request(self, service): @@ -152,10 +152,10 @@ class _TransactionController(object): if sent: yield txn.complete(self.store) else: - preserve_fn(self._start_recoverer)(service) - except Exception as e: - logger.exception(e) - preserve_fn(self._start_recoverer)(service) + run_in_background(self._start_recoverer, service) + except Exception: + logger.exception("Error creating appservice transaction") + run_in_background(self._start_recoverer, service) @defer.inlineCallbacks def on_recovered(self, recoverer): diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index fce83d445f..38944a7326 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -19,7 +19,8 @@ from synapse.api.errors import SynapseError, Codes from synapse.util import unwrapFirstError, logcontext from synapse.util.logcontext import ( PreserveLoggingContext, - preserve_fn + preserve_fn, + run_in_background, ) from synapse.util.metrics import Measure @@ -127,7 +128,7 @@ class Keyring(object): verify_requests.append(verify_request) - preserve_fn(self._start_key_lookups)(verify_requests) + run_in_background(self._start_key_lookups, verify_requests) # Pass those keys to handle_key_deferred so that the json object # signatures can be verified @@ -313,7 +314,7 @@ class Keyring(object): if not verify_request.deferred.called: verify_request.deferred.errback(err) - preserve_fn(do_iterations)().addErrback(on_err) + run_in_background(do_iterations).addErrback(on_err) @defer.inlineCallbacks def get_keys_from_store(self, server_name_and_key_ids): @@ -329,8 +330,9 @@ class Keyring(object): """ res = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.store.get_server_verify_keys)( - server_name, key_ids + run_in_background( + self.store.get_server_verify_keys, + server_name, key_ids, ).addCallback(lambda ks, server: (server, ks), server_name) for server_name, key_ids in server_name_and_key_ids ], @@ -358,7 +360,7 @@ class Keyring(object): results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(get_key)(p_name, p_keys) + run_in_background(get_key, p_name, p_keys) for p_name, p_keys in self.perspective_servers.items() ], consumeErrors=True, @@ -398,7 +400,7 @@ class Keyring(object): results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(get_key)(server_name, key_ids) + run_in_background(get_key, server_name, key_ids) for server_name, key_ids in server_name_and_key_ids ], consumeErrors=True, @@ -481,7 +483,8 @@ class Keyring(object): yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.store_keys)( + run_in_background( + self.store_keys, server_name=server_name, from_server=perspective_name, verify_keys=response_keys, @@ -539,7 +542,8 @@ class Keyring(object): yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.store_keys)( + run_in_background( + self.store_keys, server_name=key_server_name, from_server=server_name, verify_keys=verify_keys, @@ -615,7 +619,8 @@ class Keyring(object): yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.store.store_server_keys_json)( + run_in_background( + self.store.store_server_keys_json, server_name=server_name, key_id=key_id, from_server=server_name, @@ -716,7 +721,8 @@ class Keyring(object): # TODO(markjh): Store whether the keys have expired. return logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.store.store_server_verify_key)( + run_in_background( + self.store.store_server_verify_key, server_name, server_name, key.time_added, key ) for key_id, key in verify_keys.items() diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8e2c0c4cd2..8adc60863e 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -33,7 +33,7 @@ from synapse.federation.federation_base import ( import synapse.metrics from synapse.util import logcontext, unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.logutils import log_function from synapse.util.retryutils import NotRetryingDestination @@ -417,7 +417,8 @@ class FederationClient(FederationBase): batch = set(missing_events[i:i + batch_size]) deferreds = [ - preserve_fn(self.get_pdu)( + run_in_background( + self.get_pdu, destinations=random_server_list(), event_id=e_id, ) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 1fb709e6c3..5f53f17954 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -42,7 +42,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.types import get_domain_from_id -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background from signedjson.sign import sign_json @@ -192,4 +192,4 @@ class GroupAttestionRenewer(object): group_id = row["group_id"] user_id = row["user_id"] - preserve_fn(_renew_attestation)(group_id, user_id) + run_in_background(_renew_attestation, group_id, user_id) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 0245197c02..6cc2388306 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -198,7 +198,10 @@ class ApplicationServicesHandler(object): services = yield self._get_services_for_3pn(protocol) results = yield make_deferred_yieldable(defer.DeferredList([ - preserve_fn(self.appservice_api.query_3pe)(service, kind, protocol, fields) + run_in_background( + self.appservice_api.query_3pe, + service, kind, protocol, fields, + ) for service in services ], consumeErrors=True)) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 325c0c4a9f..fc958404a1 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -24,7 +24,7 @@ from synapse.api.errors import ( SynapseError, CodeMessageException, FederationDeniedError, ) from synapse.types import get_domain_from_id, UserID -from synapse.util.logcontext import preserve_fn, make_deferred_yieldable +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -139,7 +139,7 @@ class E2eKeysHandler(object): failures[destination] = _exception_to_failure(e) yield make_deferred_yieldable(defer.gatherResults([ - preserve_fn(do_remote_query)(destination) + run_in_background(do_remote_query, destination) for destination in remote_queries_not_in_cache ])) @@ -242,7 +242,7 @@ class E2eKeysHandler(object): failures[destination] = _exception_to_failure(e) yield make_deferred_yieldable(defer.gatherResults([ - preserve_fn(claim_client_keys)(destination) + run_in_background(claim_client_keys, destination) for destination in remote_queries ])) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ae7e0d6da2..c66ca0f381 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -637,7 +637,8 @@ class FederationHandler(BaseHandler): results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - logcontext.preserve_fn(self.replication_layer.get_pdu)( + logcontext.run_in_background( + self.replication_layer.get_pdu, [dest], event_id, outlier=True, @@ -1023,7 +1024,7 @@ class FederationHandler(BaseHandler): # lots of requests for missing prev_events which we do actually # have. Hence we fire off the deferred, but don't wait for it. - logcontext.preserve_fn(self._handle_queued_pdus)(room_queue) + logcontext.run_in_background(self._handle_queued_pdus, room_queue) defer.returnValue(True) @@ -1523,8 +1524,9 @@ class FederationHandler(BaseHandler): if not backfilled: # this intentionally does not yield: we don't care about the result # and don't need to wait for it. - logcontext.preserve_fn(self.pusher_pool.on_new_notifications)( - event_stream_id, max_stream_id + logcontext.run_in_background( + self.pusher_pool.on_new_notifications, + event_stream_id, max_stream_id, ) defer.returnValue((context, event_stream_id, max_stream_id)) @@ -1538,7 +1540,8 @@ class FederationHandler(BaseHandler): """ contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - logcontext.preserve_fn(self._prep_event)( + logcontext.run_in_background( + self._prep_event, origin, ev_info["event"], state=ev_info.get("state"), @@ -1867,7 +1870,8 @@ class FederationHandler(BaseHandler): different_events = yield logcontext.make_deferred_yieldable( defer.gatherResults([ - logcontext.preserve_fn(self.store.get_event)( + logcontext.run_in_background( + self.store.get_event, d, allow_none=True, allow_rejected=False, diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index c5267b4b84..cd33a86599 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -27,7 +27,7 @@ from synapse.types import ( from synapse.util import unwrapFirstError from synapse.util.async import concurrently_execute from synapse.util.caches.snapshot_cache import SnapshotCache -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -166,7 +166,8 @@ class InitialSyncHandler(BaseHandler): (messages, token), current_state = yield make_deferred_yieldable( defer.gatherResults( [ - preserve_fn(self.store.get_recent_events_for_room)( + run_in_background( + self.store.get_recent_events_for_room, event.room_id, limit=limit, end_token=room_end_token, @@ -391,9 +392,10 @@ class InitialSyncHandler(BaseHandler): presence, receipts, (messages, token) = yield defer.gatherResults( [ - preserve_fn(get_presence)(), - preserve_fn(get_receipts)(), - preserve_fn(self.store.get_recent_events_for_room)( + run_in_background(get_presence), + run_in_background(get_receipts), + run_in_background( + self.store.get_recent_events_for_room, room_id, limit=limit, end_token=now_token.room_key, diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 21628a8540..244b98dd8d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -850,7 +850,8 @@ class EventCreationHandler(object): # this intentionally does not yield: we don't care about the result # and don't need to wait for it. - preserve_fn(self.pusher_pool.on_new_notifications)( + run_in_background( + self.pusher_pool.on_new_notifications, event_stream_id, max_stream_id ) @@ -862,7 +863,7 @@ class EventCreationHandler(object): extra_users=extra_users ) - preserve_fn(_notify)() + run_in_background(_notify) if event.type == EventTypes.Message: presence = self.hs.get_presence_handler() diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 77c0cf146f..19cde70adf 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, AuthError -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer from synapse.types import UserID, get_domain_from_id @@ -97,7 +97,8 @@ class TypingHandler(object): if self.hs.is_mine_id(member.user_id): last_fed_poke = self._member_last_federation_poke.get(member, None) if not last_fed_poke or last_fed_poke + FEDERATION_PING_INTERVAL <= now: - preserve_fn(self._push_remote)( + run_in_background( + self._push_remote, member=member, typing=True ) @@ -196,7 +197,7 @@ class TypingHandler(object): def _push_update(self, member, typing): if self.hs.is_mine_id(member.user_id): # Only send updates for changes to our own users. - preserve_fn(self._push_remote)(member, typing) + run_in_background(self._push_remote, member, typing) self._push_update_local( member=member, diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 134e89b371..7bb5733090 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -14,13 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer -from .pusher import PusherFactory -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn +from synapse.push.pusher import PusherFactory from synapse.util.async import run_on_reactor - -import logging +from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) @@ -137,8 +137,9 @@ class PusherPool: if u in self.pushers: for p in self.pushers[u].values(): deferreds.append( - preserve_fn(p.on_new_notifications)( - min_stream_id, max_stream_id + run_in_background( + p.on_new_notifications, + min_stream_id, max_stream_id, ) ) @@ -164,7 +165,10 @@ class PusherPool: if u in self.pushers: for p in self.pushers[u].values(): deferreds.append( - preserve_fn(p.on_new_receipts)(min_stream_id, max_stream_id) + run_in_background( + p.on_new_receipts, + min_stream_id, max_stream_id, + ) ) yield make_deferred_yieldable(defer.gatherResults(deferreds)) @@ -207,7 +211,7 @@ class PusherPool: if appid_pushkey in byuser: byuser[appid_pushkey].on_stop() byuser[appid_pushkey] = p - preserve_fn(p.on_started)() + run_in_background(p.on_started) logger.info("Started pushers") diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 0fc21540c6..9290d7946f 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -35,7 +35,7 @@ from ._base import FileInfo from synapse.api.errors import ( SynapseError, Codes, ) -from synapse.util.logcontext import preserve_fn, make_deferred_yieldable +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.stringutils import random_string from synapse.util.caches.expiringcache import ExpiringCache from synapse.http.client import SpiderHttpClient @@ -144,7 +144,8 @@ class PreviewUrlResource(Resource): observable = self._cache.get(url) if not observable: - download = preserve_fn(self._do_preview)( + download = run_in_background( + self._do_preview, url, requester.user, ts, ) observable = ObservableDeferred( diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index a937b9bceb..ba834854e1 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -20,7 +20,7 @@ from synapse.events import FrozenEvent from synapse.events.utils import prune_event from synapse.util.logcontext import ( - preserve_fn, PreserveLoggingContext, make_deferred_yieldable + PreserveLoggingContext, make_deferred_yieldable, run_in_background, ) from synapse.util.metrics import Measure from synapse.api.errors import SynapseError @@ -319,7 +319,8 @@ class EventsWorkerStore(SQLBaseStore): res = yield make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self._get_event_from_row)( + run_in_background( + self._get_event_from_row, row["internal_metadata"], row["json"], row["redacts"], rejected_reason=row["rejects"], ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 2956c3b3e0..5b245a936c 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -41,7 +41,7 @@ from synapse.storage.events import EventsWorkerStore from synapse.util.caches.descriptors import cached from synapse.types import RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.storage.engines import PostgresEngine, Sqlite3Engine import abc @@ -198,7 +198,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): room_ids = list(room_ids) for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): res = yield make_deferred_yieldable(defer.gatherResults([ - preserve_fn(self.get_room_events_stream_for_room)( + run_in_background( + self.get_room_events_stream_for_room, room_id, from_key, to_key, limit, order=order, ) for room_id in rm_ids diff --git a/synapse/util/async.py b/synapse/util/async.py index 0729bb2863..bd07067328 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -17,7 +17,7 @@ from twisted.internet import defer, reactor from .logcontext import ( - PreserveLoggingContext, make_deferred_yieldable, preserve_fn + PreserveLoggingContext, make_deferred_yieldable, run_in_background ) from synapse.util import logcontext, unwrapFirstError @@ -161,7 +161,7 @@ def concurrently_execute(func, args, limit): pass return logcontext.make_deferred_yieldable(defer.gatherResults([ - preserve_fn(_concurrently_execute_inner)() + run_in_background(_concurrently_execute_inner) for _ in xrange(limit) ], consumeErrors=True)).addErrback(unwrapFirstError) diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 3c8a165331..3380970e4e 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -15,7 +15,7 @@ from twisted.internet import threads, reactor -from synapse.util.logcontext import make_deferred_yieldable, preserve_fn +from synapse.util.logcontext import make_deferred_yieldable, run_in_background from six.moves import queue @@ -70,7 +70,9 @@ class BackgroundFileConsumer(object): self._producer = producer self.streaming = streaming - self._finished_deferred = preserve_fn(threads.deferToThread)(self._writer) + self._finished_deferred = run_in_background( + threads.deferToThread, self._writer + ) if not streaming: self._producer.resumeProducing() diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index d59adc236e..c2edf87e58 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -341,7 +341,7 @@ def make_deferred_yieldable(deferred): returning a deferred. Then, when the deferred completes, restores the current logcontext before running callbacks/errbacks. - (This is more-or-less the opposite operation to preserve_fn.) + (This is more-or-less the opposite operation to run_in_background.) """ if isinstance(deferred, defer.Deferred) and not deferred.called: prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 1101881a2d..18424f6c36 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import LimitExceededError from synapse.util.async import sleep -from synapse.util.logcontext import preserve_fn +from synapse.util.logcontext import run_in_background import collections import contextlib @@ -150,7 +150,7 @@ class _PerHostRatelimiter(object): "Ratelimit [%s]: sleeping req", id(request_id), ) - ret_defer = preserve_fn(sleep)(self.sleep_msec / 1000.0) + ret_defer = run_in_background(sleep, self.sleep_msec / 1000.0) self.sleeping_requests.add(request_id) diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 47b0bb5eb3..4e93f69d3a 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -203,8 +203,8 @@ class RetryDestinationLimiter(object): ) except Exception: logger.exception( - "Failed to store set_destination_retry_timings", + "Failed to store destination_retry_timings", ) # we deliberately do this in the background. - synapse.util.logcontext.preserve_fn(store_retry_timings)() + synapse.util.logcontext.run_in_background(store_retry_timings) -- cgit 1.4.1 From 453adf00b67fc9156050678640caa38a71181ebf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 14:32:08 +0100 Subject: pep8; remove spurious import --- synapse/handlers/message.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 0f26b91862..23502eda70 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -31,7 +31,7 @@ from synapse.types import ( UserID, RoomAlias, RoomStreamToken, ) from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter -from synapse.util.logcontext import preserve_fn, run_in_background +from synapse.util.logcontext import run_in_background from synapse.util.metrics import measure_func from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.stringutils import random_string -- cgit 1.4.1 From 4f5694e2cecc8530d09fd00e8bb6f165764a6042 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Fri, 27 Apr 2018 16:22:26 +0200 Subject: Add py3 tests to tox with folders that work It's just a few tests, but it will at least prevent a few files from regressing. Also, it makes it easiert to check your code against py36 while writing it. Signed-off-by: Adrian Tschira --- tox.ini | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index f408defc8f..c6cee92568 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = packaging, py27, pep8 +envlist = packaging, py27, py36 pep8 [testenv] deps = @@ -46,6 +46,14 @@ commands = # ) usedevelop=true +[testenv:py36] +usedevelop=true +commands = + /usr/bin/find "{toxinidir}" -name '*.pyc' -delete + coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \ + "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests/metrics} {env:TOXSUFFIX:} + {env:DUMP_COVERAGE_COMMAND:coverage report -m} + [testenv:packaging] deps = check-manifest -- cgit 1.4.1 From a376d8f7615e9715d6c5c3fa2d95f9d48ff87a61 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 12:29:02 +0200 Subject: open log_config in text mode too Signed-off-by: Adrian Tschira --- synapse/config/logger.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 3f70039acd..6a7228dc2f 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -117,7 +117,7 @@ class LoggingConfig(Config): log_config = config.get("log_config") if log_config and not os.path.exists(log_config): log_file = self.abspath("homeserver.log") - with open(log_config, "wb") as log_config_file: + with open(log_config, "w") as log_config_file: log_config_file.write( DEFAULT_LOG_CONFIG.substitute(log_file=log_file) ) -- cgit 1.4.1 From cdb4647a80dfccf4f8865a04c28bfaffa378ef10 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 13:04:40 +0200 Subject: Don't yield in list comprehensions I've tried to grep for more of this with no success. Signed-off-by: Adrian Tschira --- synapse/handlers/appservice.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 0245197c02..b7776693de 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -259,11 +259,15 @@ class ApplicationServicesHandler(object): event based on the service regex. """ services = self.store.get_app_services() - interested_list = [ - s for s in services if ( - yield s.is_interested(event, self.store) - ) - ] + + # we can't use a list comprehension here. Since python 3, list + # comprehensions use a generator internally. This means you can't yield + # inside of a list comprehension anymore. + interested_list = [] + for s in services: + if (yield s.is_interested(event, self.store)): + interested_list.append(s) + defer.returnValue(interested_list) def _get_services_for_user(self, user_id): -- cgit 1.4.1 From 57b58e2174f120fb13fbe2f6d57e8647b69921ec Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 12:17:56 +0200 Subject: make imports local Signed-off-by: Adrian Tschira --- synapse/push/httppusher.py | 4 ++-- synapse/push/pusher.py | 2 +- synapse/replication/tcp/protocol.py | 4 ++-- synapse/replication/tcp/resource.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 1420d378ef..b077e1a446 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -18,8 +18,8 @@ import logging from twisted.internet import defer, reactor from twisted.internet.error import AlreadyCalled, AlreadyCancelled -import push_rule_evaluator -import push_tools +from . import push_rule_evaluator +from . import push_tools import synapse from synapse.push import PusherConfigException from synapse.util.logcontext import LoggingContext diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index 71576330a9..5aa6667e91 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from httppusher import HttpPusher +from .httppusher import HttpPusher import logging logger = logging.getLogger(__name__) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 0a9a290af4..d7d38464b2 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -53,12 +53,12 @@ from twisted.internet import defer from twisted.protocols.basic import LineOnlyReceiver from twisted.python.failure import Failure -from commands import ( +from .commands import ( COMMAND_MAP, VALID_CLIENT_COMMANDS, VALID_SERVER_COMMANDS, ErrorCommand, ServerCommand, RdataCommand, PositionCommand, PingCommand, NameCommand, ReplicateCommand, UserSyncCommand, SyncCommand, ) -from streams import STREAMS_MAP +from .streams import STREAMS_MAP from synapse.util.stringutils import random_string from synapse.metrics.metric import CounterMetric diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 786c3fe864..a41af4fd6c 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -18,8 +18,8 @@ from twisted.internet import defer, reactor from twisted.internet.protocol import Factory -from streams import STREAMS_MAP, FederationStream -from protocol import ServerReplicationStreamProtocol +from .streams import STREAMS_MAP, FederationStream +from .protocol import ServerReplicationStreamProtocol from synapse.util.metrics import Measure, measure_func -- cgit 1.4.1 From 94f4d7f49ed322dac38e8946bba590ac41c0f40f Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 11:46:48 +0200 Subject: move httplib import to six --- synapse/handlers/federation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 260df025f9..24ff6782c5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -16,7 +16,6 @@ """Contains handlers for federation events.""" -import httplib import itertools import logging import sys @@ -24,6 +23,7 @@ import sys from signedjson.key import decode_verify_key_bytes from signedjson.sign import verify_signed_json import six +from six.moves import http_client from twisted.internet import defer from unpaddedbase64 import decode_base64 @@ -889,7 +889,7 @@ class FederationHandler(BaseHandler): logger.warn("Rejecting event %s which has %i prev_events", ev.event_id, len(ev.prev_events)) raise SynapseError( - httplib.BAD_REQUEST, + http_client.BAD_REQUEST, "Too many prev_events", ) @@ -897,7 +897,7 @@ class FederationHandler(BaseHandler): logger.warn("Rejecting event %s which has %i auth_events", ev.event_id, len(ev.auth_events)) raise SynapseError( - httplib.BAD_REQUEST, + http_client.BAD_REQUEST, "Too many auth_events", ) -- cgit 1.4.1 From 4f2f5171b752169e265b5d93c9b4e788171b5326 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 11:39:37 +0200 Subject: replace stringIO imports --- synapse/http/client.py | 4 ++-- synapse/util/logformatter.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index 62309c3365..70a19d9b74 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -40,7 +40,7 @@ from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers from twisted.web._newclient import ResponseDone -from StringIO import StringIO +from six import StringIO import simplejson as json import logging @@ -507,7 +507,7 @@ class SpiderHttpClient(SimpleHttpClient): reactor, SpiderEndpointFactory(hs) ) - ), [('gzip', GzipDecoder)] + ), [(b'gzip', GzipDecoder)] ) # We could look like Chrome: # self.user_agent = ("Mozilla/5.0 (%s) (KHTML, like Gecko) diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py index cdbc4bffd7..59ab3c6968 100644 --- a/synapse/util/logformatter.py +++ b/synapse/util/logformatter.py @@ -14,7 +14,7 @@ # limitations under the License. -import StringIO +from six import StringIO import logging import traceback -- cgit 1.4.1 From d82b6ea9e68150aece1fc46cb0821b31cf728910 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 13:57:00 +0200 Subject: Move more xrange to six plus a bonus next() Signed-off-by: Adrian Tschira --- synapse/federation/federation_client.py | 4 +++- synapse/handlers/room_list.py | 4 +++- synapse/storage/registration.py | 4 +++- synapse/storage/schema/delta/30/as_users.py | 4 +++- synapse/storage/stream.py | 4 +++- synapse/storage/tags.py | 4 +++- synapse/util/async.py | 6 ++++-- synapse/util/stringutils.py | 5 +++-- synapse/util/wheel_timer.py | 4 +++- 9 files changed, 28 insertions(+), 11 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8e2c0c4cd2..a2067fc390 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -19,6 +19,8 @@ import itertools import logging import random +from six.moves import range + from twisted.internet import defer from synapse.api.constants import Membership @@ -413,7 +415,7 @@ class FederationClient(FederationBase): batch_size = 20 missing_events = list(missing_events) - for i in xrange(0, len(missing_events), batch_size): + for i in range(0, len(missing_events), batch_size): batch = set(missing_events[i:i + batch_size]) deferreds = [ diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index add3f9b009..5757bb7f8a 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -15,6 +15,8 @@ from twisted.internet import defer +from six.moves import range + from ._base import BaseHandler from synapse.api.constants import ( @@ -200,7 +202,7 @@ class RoomListHandler(BaseHandler): step = len(rooms_to_scan) if len(rooms_to_scan) != 0 else 1 chunk = [] - for i in xrange(0, len(rooms_to_scan), step): + for i in range(0, len(rooms_to_scan), step): batch = rooms_to_scan[i:i + step] logger.info("Processing %i rooms for result", len(batch)) yield concurrently_execute( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 6b557ca0cf..a50717db2d 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -22,6 +22,8 @@ from synapse.storage import background_updates from synapse.storage._base import SQLBaseStore from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +from six.moves import range + class RegistrationWorkerStore(SQLBaseStore): @cached() @@ -469,7 +471,7 @@ class RegistrationStore(RegistrationWorkerStore, match = regex.search(user_id) if match: found.add(int(match.group(1))) - for i in xrange(len(found) + 1): + for i in range(len(found) + 1): if i not in found: return i diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py index c53e53c94f..85bd1a2006 100644 --- a/synapse/storage/schema/delta/30/as_users.py +++ b/synapse/storage/schema/delta/30/as_users.py @@ -14,6 +14,8 @@ import logging from synapse.config.appservice import load_appservices +from six.moves import range + logger = logging.getLogger(__name__) @@ -58,7 +60,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): for as_id, user_ids in owned.items(): n = 100 - user_chunks = (user_ids[i:i + 100] for i in xrange(0, len(user_ids), n)) + user_chunks = (user_ids[i:i + 100] for i in range(0, len(user_ids), n)) for chunk in user_chunks: cur.execute( database_engine.convert_param_style( diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 3b8b539993..52c90d8e04 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -47,6 +47,8 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine import abc import logging +from six.moves import range + logger = logging.getLogger(__name__) @@ -196,7 +198,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): results = {} room_ids = list(room_ids) - for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): + for rm_ids in (room_ids[i:i + 20] for i in range(0, len(room_ids), 20)): res = yield make_deferred_yieldable(defer.gatherResults([ preserve_fn(self.get_room_events_stream_for_room)( room_id, from_key, to_key, limit, order=order, diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 13bff9f055..6671d3cfca 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -22,6 +22,8 @@ from twisted.internet import defer import simplejson as json import logging +from six.moves import range + logger = logging.getLogger(__name__) @@ -98,7 +100,7 @@ class TagsWorkerStore(AccountDataWorkerStore): batch_size = 50 results = [] - for i in xrange(0, len(tag_ids), batch_size): + for i in range(0, len(tag_ids), batch_size): tags = yield self.runInteraction( "get_all_updated_tag_content", get_tag_content, diff --git a/synapse/util/async.py b/synapse/util/async.py index 1df5c5600c..cb53c31123 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -27,6 +27,8 @@ from contextlib import contextmanager import logging +from six.moves import range + logger = logging.getLogger(__name__) @@ -158,13 +160,13 @@ def concurrently_execute(func, args, limit): def _concurrently_execute_inner(): try: while True: - yield func(it.next()) + yield func(next(it)) except StopIteration: pass return logcontext.make_deferred_yieldable(defer.gatherResults([ preserve_fn(_concurrently_execute_inner)() - for _ in xrange(limit) + for _ in range(limit) ], consumeErrors=True)).addErrback(unwrapFirstError) diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 95a6168e16..b98b9dc6e4 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -15,6 +15,7 @@ import random import string +from six.moves import range _string_with_symbols = ( string.digits + string.ascii_letters + ".,;:^&*-_+=#~@" @@ -22,12 +23,12 @@ _string_with_symbols = ( def random_string(length): - return ''.join(random.choice(string.ascii_letters) for _ in xrange(length)) + return ''.join(random.choice(string.ascii_letters) for _ in range(length)) def random_string_with_symbols(length): return ''.join( - random.choice(_string_with_symbols) for _ in xrange(length) + random.choice(_string_with_symbols) for _ in range(length) ) diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index b70f9a6b0a..7a9e45aca9 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from six.moves import range + class _Entry(object): __slots__ = ["end_key", "queue"] @@ -68,7 +70,7 @@ class WheelTimer(object): # Add empty entries between the end of the current list and when we want # to insert. This ensures there are no gaps. self.entries.extend( - _Entry(key) for key in xrange(last_key, then_key + 1) + _Entry(key) for key in range(last_key, then_key + 1) ) self.entries[-1].queue.append(obj) -- cgit 1.4.1 From 42c89c8215533e3ef48cf8c9bf33595d4826b905 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 28 Apr 2018 22:27:30 +0100 Subject: make it work with sqlite --- .../schema/delta/48/group_unique_indexes.py | 54 ++++++++++++++++++++++ .../schema/delta/48/group_unique_indexes.sql | 34 -------------- 2 files changed, 54 insertions(+), 34 deletions(-) create mode 100644 synapse/storage/schema/delta/48/group_unique_indexes.py delete mode 100644 synapse/storage/schema/delta/48/group_unique_indexes.sql diff --git a/synapse/storage/schema/delta/48/group_unique_indexes.py b/synapse/storage/schema/delta/48/group_unique_indexes.py new file mode 100644 index 0000000000..c1f5881b3e --- /dev/null +++ b/synapse/storage/schema/delta/48/group_unique_indexes.py @@ -0,0 +1,54 @@ +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.storage.engines import PostgresEngine + +FIX_INDEXES = """ +-- rebuild indexes as uniques +DROP INDEX groups_invites_g_idx; +CREATE UNIQUE INDEX group_invites_g_idx ON group_invites(group_id, user_id); +DROP INDEX groups_users_g_idx; +CREATE UNIQUE INDEX group_users_g_idx ON group_users(group_id, user_id); + +-- rename other indexes to actually match their table names.. +DROP INDEX groups_users_u_idx; +CREATE INDEX group_users_u_idx ON group_users(user_id); +DROP INDEX groups_invites_u_idx; +CREATE INDEX group_invites_u_idx ON group_invites(user_id); +DROP INDEX groups_rooms_g_idx; +CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms(group_id, room_id); +DROP INDEX groups_rooms_r_idx; +CREATE INDEX group_rooms_r_idx ON group_rooms(room_id); +""" + +def run_create(cur, database_engine, *args, **kwargs): + rowid = "ctid" if isinstance(database_engine, PostgresEngine) then "rowid" + + # remove duplicates from group_users & group_invites tables + cur.execute(""" + DELETE FROM group_users WHERE %s NOT IN ( + SELECT min(%s) FROM group_users GROUP BY group_id, user_id + ); + """ % (rowid, rowid)); + cur.execute(""" + DELETE FROM group_invites WHERE %s NOT IN ( + SELECT min(%s) FROM group_invites GROUP BY group_id, user_id + ); + """ % (rowid, rowid)); + + for statement in get_statements(FIX_INDEXES.splitlines()): + cur.execute(statement) + +def run_upgrade(*args, **kwargs): + pass diff --git a/synapse/storage/schema/delta/48/group_unique_indexes.sql b/synapse/storage/schema/delta/48/group_unique_indexes.sql deleted file mode 100644 index 9ea7a8f2e6..0000000000 --- a/synapse/storage/schema/delta/48/group_unique_indexes.sql +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright 2018 New Vector Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - --- remove duplicates from group_users table -DELETE FROM group_users WHERE ctid NOT IN ( - SELECT min(ctid) FROM group_users GROUP BY group_id, user_id -); -DROP INDEX groups_users_g_idx; -CREATE UNIQUE INDEX group_users_g_idx ON group_users(group_id, user_id); - --- remove duplicates from group_invites table -DELETE FROM group_invites WHERE ctid NOT IN ( - SELECT min(ctid) FROM group_invites GROUP BY group_id, user_id -); -DROP INDEX groups_invites_g_idx; -CREATE UNIQUE INDEX group_invites_g_idx ON group_invites(group_id, user_id); - --- rename other indexes to actually match their table names... -ALTER INDEX groups_users_u_idx RENAME TO group_users_u_idx; -ALTER INDEX groups_invites_u_idx RENAME TO group_invites_u_idx; -ALTER INDEX groups_rooms_g_idx RENAME TO group_rooms_g_idx; -ALTER INDEX groups_rooms_r_idx RENAME TO group_rooms_r_idx; -- cgit 1.4.1 From 006e18b6bbca0721e4de957e641ef64a2f0903cf Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 28 Apr 2018 22:32:24 +0100 Subject: pep8 --- synapse/storage/schema/delta/48/group_unique_indexes.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/synapse/storage/schema/delta/48/group_unique_indexes.py b/synapse/storage/schema/delta/48/group_unique_indexes.py index c1f5881b3e..b653a987b1 100644 --- a/synapse/storage/schema/delta/48/group_unique_indexes.py +++ b/synapse/storage/schema/delta/48/group_unique_indexes.py @@ -32,23 +32,25 @@ DROP INDEX groups_rooms_r_idx; CREATE INDEX group_rooms_r_idx ON group_rooms(room_id); """ + def run_create(cur, database_engine, *args, **kwargs): - rowid = "ctid" if isinstance(database_engine, PostgresEngine) then "rowid" + rowid = "ctid" if isinstance(database_engine, PostgresEngine) else "rowid" # remove duplicates from group_users & group_invites tables cur.execute(""" DELETE FROM group_users WHERE %s NOT IN ( SELECT min(%s) FROM group_users GROUP BY group_id, user_id ); - """ % (rowid, rowid)); + """ % (rowid, rowid)) cur.execute(""" DELETE FROM group_invites WHERE %s NOT IN ( SELECT min(%s) FROM group_invites GROUP BY group_id, user_id ); - """ % (rowid, rowid)); + """ % (rowid, rowid)) for statement in get_statements(FIX_INDEXES.splitlines()): cur.execute(statement) + def run_upgrade(*args, **kwargs): pass -- cgit 1.4.1 From adaf3ec87fe59c59ad533cab99b5f2b1d455ab08 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 28 Apr 2018 22:39:15 +0100 Subject: fix missing import --- synapse/storage/schema/delta/48/group_unique_indexes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/schema/delta/48/group_unique_indexes.py b/synapse/storage/schema/delta/48/group_unique_indexes.py index b653a987b1..2233af87d7 100644 --- a/synapse/storage/schema/delta/48/group_unique_indexes.py +++ b/synapse/storage/schema/delta/48/group_unique_indexes.py @@ -13,6 +13,7 @@ # limitations under the License. from synapse.storage.engines import PostgresEngine +from synapse.storage.prepare_database import get_statements FIX_INDEXES = """ -- rebuild indexes as uniques -- cgit 1.4.1 From e9143b659352e87fd9e26c8e5a771c78011bc945 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 23:56:59 +0200 Subject: more bytes strings Signed-off-by: Adrian Tschira --- synapse/http/endpoint.py | 2 +- synapse/http/server.py | 2 +- synapse/rest/media/v1/upload_resource.py | 6 +++--- synapse/util/httpresourcetree.py | 7 +++++-- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index 00572c2897..db455e5909 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -286,7 +286,7 @@ def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=t if (len(answers) == 1 and answers[0].type == dns.SRV and answers[0].payload - and answers[0].payload.target == dns.Name('.')): + and answers[0].payload.target == dns.Name(b'.')): raise ConnectError("Service %s unavailable" % service_name) for answer in answers: diff --git a/synapse/http/server.py b/synapse/http/server.py index 8d632290de..55b9ad5251 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -546,6 +546,6 @@ def _request_user_agent_is_curl(request): b"User-Agent", default=[] ) for user_agent in user_agents: - if "curl" in user_agent: + if b"curl" in user_agent: return True return False diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index f6f498cdc5..a31e75cb46 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -81,15 +81,15 @@ class UploadResource(Resource): headers = request.requestHeaders if headers.hasHeader("Content-Type"): - media_type = headers.getRawHeaders("Content-Type")[0] + media_type = headers.getRawHeaders(b"Content-Type")[0] else: raise SynapseError( msg="Upload request missing 'Content-Type'", code=400, ) - # if headers.hasHeader("Content-Disposition"): - # disposition = headers.getRawHeaders("Content-Disposition")[0] + # if headers.hasHeader(b"Content-Disposition"): + # disposition = headers.getRawHeaders(b"Content-Disposition")[0] # TODO(markjh): parse content-dispostion content_uri = yield self.media_repo.create_content( diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py index d747849553..e9f0f292ee 100644 --- a/synapse/util/httpresourcetree.py +++ b/synapse/util/httpresourcetree.py @@ -40,9 +40,12 @@ def create_resource_tree(desired_tree, root_resource): # extra resources to existing nodes. See self._resource_id for the key. resource_mappings = {} for full_path, res in desired_tree.items(): + # twisted requires all resources to be bytes + full_path = full_path.encode("utf-8") + logger.info("Attaching %s to path %s", res, full_path) last_resource = root_resource - for path_seg in full_path.split('/')[1:-1]: + for path_seg in full_path.split(b'/')[1:-1]: if path_seg not in last_resource.listNames(): # resource doesn't exist, so make a "dummy resource" child_resource = NoResource() @@ -57,7 +60,7 @@ def create_resource_tree(desired_tree, root_resource): # =========================== # now attach the actual desired resource - last_path_seg = full_path.split('/')[-1] + last_path_seg = full_path.split(b'/')[-1] # if there is already a resource here, thieve its children and # replace it -- cgit 1.4.1 From 122593265b4a10732b2e7fcfe2f5b2eba83d61d7 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 23:53:07 +0200 Subject: Construct HMAC as bytes on py3 Signed-off-by: Adrian Tschira --- synapse/rest/client/v1/register.py | 16 +++++++++------- synapse/rest/client/v2_alpha/register.py | 10 ++++++---- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 8a82097178..9b3022e0b0 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -30,6 +30,8 @@ from hashlib import sha1 import hmac import logging +from six import string_types + logger = logging.getLogger(__name__) @@ -333,11 +335,11 @@ class RegisterRestServlet(ClientV1RestServlet): def _do_shared_secret(self, request, register_json, session): yield run_on_reactor() - if not isinstance(register_json.get("mac", None), basestring): + if not isinstance(register_json.get("mac", None), string_types): raise SynapseError(400, "Expected mac.") - if not isinstance(register_json.get("user", None), basestring): + if not isinstance(register_json.get("user", None), string_types): raise SynapseError(400, "Expected 'user' key.") - if not isinstance(register_json.get("password", None), basestring): + if not isinstance(register_json.get("password", None), string_types): raise SynapseError(400, "Expected 'password' key.") if not self.hs.config.registration_shared_secret: @@ -358,14 +360,14 @@ class RegisterRestServlet(ClientV1RestServlet): got_mac = str(register_json["mac"]) want_mac = hmac.new( - key=self.hs.config.registration_shared_secret, + key=self.hs.config.registration_shared_secret.encode(), digestmod=sha1, ) want_mac.update(user) - want_mac.update("\x00") + want_mac.update(b"\x00") want_mac.update(password) - want_mac.update("\x00") - want_mac.update("admin" if admin else "notadmin") + want_mac.update(b"\x00") + want_mac.update(b"admin" if admin else b"notadmin") want_mac = want_mac.hexdigest() if compare_digest(want_mac, got_mac): diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index f317c919dc..5cab00aea9 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -35,6 +35,8 @@ from hashlib import sha1 from synapse.util.async import run_on_reactor from synapse.util.ratelimitutils import FederationRateLimiter +from six import string_types + # We ought to be using hmac.compare_digest() but on older pythons it doesn't # exist. It's a _really minor_ security flaw to use plain string comparison @@ -210,14 +212,14 @@ class RegisterRestServlet(RestServlet): # in sessions. Pull out the username/password provided to us. desired_password = None if 'password' in body: - if (not isinstance(body['password'], basestring) or + if (not isinstance(body['password'], string_types) or len(body['password']) > 512): raise SynapseError(400, "Invalid password") desired_password = body["password"] desired_username = None if 'username' in body: - if (not isinstance(body['username'], basestring) or + if (not isinstance(body['username'], string_types) or len(body['username']) > 512): raise SynapseError(400, "Invalid username") desired_username = body['username'] @@ -243,7 +245,7 @@ class RegisterRestServlet(RestServlet): access_token = get_access_token_from_request(request) - if isinstance(desired_username, basestring): + if isinstance(desired_username, string_types): result = yield self._do_appservice_registration( desired_username, access_token, body ) @@ -464,7 +466,7 @@ class RegisterRestServlet(RestServlet): # includes the password and admin flag in the hashed text. Why are # these different? want_mac = hmac.new( - key=self.hs.config.registration_shared_secret, + key=self.hs.config.registration_shared_secret.encode(), msg=user, digestmod=sha1, ).hexdigest() -- cgit 1.4.1 From 7767a9fc0e2adbe1828e0bd86e83446a9fb66bc7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 30 Apr 2018 00:37:32 +0100 Subject: Update tox.ini add missing comma --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index c6cee92568..ca8373662b 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = packaging, py27, py36 pep8 +envlist = packaging, py27, py36, pep8 [testenv] deps = -- cgit 1.4.1 From af3cc5051124982182e7151e35fb72f857d81392 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 12:09:47 +0100 Subject: Remove redundant call to preserve_fn submit_event_for_as doesn't return a deferred anyway, so this is pointless. --- synapse/handlers/appservice.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 6cc2388306..f6c7b89270 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -19,7 +19,7 @@ import synapse from synapse.api.constants import EventTypes from synapse.util.metrics import Measure from synapse.util.logcontext import ( - make_deferred_yieldable, preserve_fn, run_in_background, + make_deferred_yieldable, run_in_background, ) import logging @@ -111,9 +111,7 @@ class ApplicationServicesHandler(object): # Fork off pushes to these services for service in services: - preserve_fn(self.scheduler.submit_event_for_as)( - service, event - ) + self.scheduler.submit_event_for_as(service, event) @defer.inlineCallbacks def handle_room_events(events): -- cgit 1.4.1 From d1d54d60889f94228adff55f64116aa66d07ac3a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 30 Apr 2018 00:58:31 +0100 Subject: add py36 to build matrix --- .travis.yml | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3ce93cb434..e6ba6f4752 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,14 +1,22 @@ sudo: false language: python -python: 2.7 # tell travis to cache ~/.cache/pip cache: pip -env: - - TOX_ENV=packaging - - TOX_ENV=pep8 - - TOX_ENV=py27 +matrix: + include: + - python: 2.7 + env: TOX_ENV=packaging + + - python: 2.7 + env: TOX_ENV=pep8 + + - python: 2.7 + env: TOX_ENV=py27 + + - python: 3.6 + env: TOX_ENV=py36 install: - pip install tox -- cgit 1.4.1 From 0c9db26260210bd2066048333b2644a2511b1801 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Mon, 30 Apr 2018 09:49:10 +0200 Subject: add comment explaining attributeerror --- synapse/events/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index d4d1b92f7a..c3ff85c49a 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -47,6 +47,9 @@ class _EventInternalMetadata(object): def _event_dict_property(key): + # We want to be able to use hasattr with the event dict properties. + # However, (on python3) hasattr expects AttributeError to be raised. Hence, + # we need to transform the KeyError into an AttributeError def getter(self): try: return self._event_dict[key] -- cgit 1.4.1 From 6e005d13821b86e4727f4f28f4c7b1e51c05d096 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Mon, 30 Apr 2018 10:37:41 +0200 Subject: run config tests on py3 Signed-off-by: Adrian Tschira --- tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index ca8373662b..89ca6d7a19 100644 --- a/tox.ini +++ b/tox.ini @@ -51,7 +51,8 @@ usedevelop=true commands = /usr/bin/find "{toxinidir}" -name '*.pyc' -delete coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \ - "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests/metrics} {env:TOXSUFFIX:} + "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests/metrics tests/config} \ + {env:TOXSUFFIX:} {env:DUMP_COVERAGE_COMMAND:coverage report -m} [testenv:packaging] -- cgit 1.4.1 From 576b71dd3d7139ae246b900fea4533d813f38a2a Mon Sep 17 00:00:00 2001 From: Krombel Date: Mon, 30 Apr 2018 14:29:48 +0200 Subject: add guard for None on purge_history api --- synapse/rest/client/v1/admin.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 303419d281..efd5c9873d 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -168,11 +168,24 @@ class PurgeHistoryRestServlet(ClientV1RestServlet): yield self.store.find_first_stream_ordering_after_ts(ts) ) - (_, depth, _) = ( + room_event_after_stream_ordering = ( yield self.store.get_room_event_after_stream_ordering( room_id, stream_ordering, ) ) + if room_event_after_stream_ordering: + (_, depth, _) = room_event_after_stream_ordering + else: + logger.warn( + "[purge] purging events not possible: No event found " + "(received_ts %i => stream_ordering %i)", + ts, stream_ordering, + ) + raise SynapseError( + 404, + "there is no event to be purged", + errcode=Codes.NOT_FOUND, + ) logger.info( "[purge] purging up to depth %i (received_ts %i => " "stream_ordering %i)", -- cgit 1.4.1 From 2ad3fc36e66fcc86cc3fde99760e4851402a8d56 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 30 Apr 2018 16:21:11 +0100 Subject: Fixes #3135 - Replace _OpenSSLECCurve with crypto.get_elliptic_curve (#3157) fixes #3135 Signed-off-by: Will Hunt will@half-shot.uk --- synapse/crypto/context_factory.py | 9 +++++---- synapse/python_dependencies.py | 7 +++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index cff3ca809a..0397f73ab4 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -13,8 +13,8 @@ # limitations under the License. from twisted.internet import ssl -from OpenSSL import SSL -from twisted.internet._sslverify import _OpenSSLECCurve, _defaultCurveName +from OpenSSL import SSL, crypto +from twisted.internet._sslverify import _defaultCurveName import logging @@ -32,8 +32,9 @@ class ServerContextFactory(ssl.ContextFactory): @staticmethod def configure_context(context, config): try: - _ecCurve = _OpenSSLECCurve(_defaultCurveName) - _ecCurve.addECKeyToContext(context) + _ecCurve = crypto.get_elliptic_curve(_defaultCurveName) + context.set_tmp_ecdh(_ecCurve) + except Exception: logger.exception("Failed to enable elliptic curve for TLS") context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 711cbb6c50..216db4d164 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -39,12 +39,11 @@ REQUIREMENTS = { "signedjson>=1.0.0": ["signedjson>=1.0.0"], "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"], "service_identity>=1.0.0": ["service_identity>=1.0.0"], + "Twisted>=16.0.0": ["twisted>=16.0.0"], - # we break under Twisted 18.4 - # (https://github.com/matrix-org/synapse/issues/3135) - "Twisted>=16.0.0,<18.4": ["twisted>=16.0.0"], + # We use crypto.get_elliptic_curve which is only supported in >=0.15 + "pyopenssl>=0.15": ["OpenSSL>=0.15"], - "pyopenssl>=0.14": ["OpenSSL>=0.14"], "pyyaml": ["yaml"], "pyasn1": ["pyasn1"], "daemonize": ["daemonize"], -- cgit 1.4.1 From 6495dbb326dd2b5d58e5de25107f7fe6d13b6ca4 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Mon, 30 Apr 2018 21:58:30 +0200 Subject: Burminate v1auth This closes #2602 v1auth was created to account for the differences in status code between the v1 and v2_alpha revisions of the protocol (401 vs 403 for invalid tokens). However since those protocols were merged, this makes the r0 version/endpoint internally inconsistent, and violates the specification for the r0 endpoint. This might break clients that rely on this inconsistency with the specification. This is said to affect the legacy angular reference client. However, I feel that restoring parity with the spec is more important. Either way, it is critical to inform developers about this change, in case they rely on the illegal behaviour. Signed-off-by: Adrian Tschira --- synapse/rest/client/v1/base.py | 6 +++++- synapse/rest/client/v1/pusher.py | 2 +- synapse/server.py | 10 ---------- tests/rest/client/v1/test_events.py | 9 +++++++-- tests/rest/client/v1/test_profile.py | 2 +- tests/rest/client/v1/test_rooms.py | 18 +++++++++--------- tests/rest/client/v1/test_typing.py | 2 +- 7 files changed, 24 insertions(+), 25 deletions(-) diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py index c7aa0bbf59..197335d7aa 100644 --- a/synapse/rest/client/v1/base.py +++ b/synapse/rest/client/v1/base.py @@ -52,6 +52,10 @@ class ClientV1RestServlet(RestServlet): """A base Synapse REST Servlet for the client version 1 API. """ + # This subclass was presumably created to allow the auth for the v1 + # protocol version to be different, however this behaviour was removed. + # it may no longer be necessary + def __init__(self, hs): """ Args: @@ -59,5 +63,5 @@ class ClientV1RestServlet(RestServlet): """ self.hs = hs self.builder_factory = hs.get_event_builder_factory() - self.auth = hs.get_v1auth() + self.auth = hs.get_auth() self.txns = HttpTransactionCache(hs.get_clock()) diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 1819a560cb..0206e664c1 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -150,7 +150,7 @@ class PushersRemoveRestServlet(RestServlet): super(RestServlet, self).__init__() self.hs = hs self.notifier = hs.get_notifier() - self.auth = hs.get_v1auth() + self.auth = hs.get_auth() self.pusher_pool = self.hs.get_pusherpool() @defer.inlineCallbacks diff --git a/synapse/server.py b/synapse/server.py index cd0c1a51be..ebdea6b0c4 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -105,7 +105,6 @@ class HomeServer(object): 'federation_client', 'federation_server', 'handlers', - 'v1auth', 'auth', 'state_handler', 'state_resolution_handler', @@ -225,15 +224,6 @@ class HomeServer(object): def build_simple_http_client(self): return SimpleHttpClient(self) - def build_v1auth(self): - orf = Auth(self) - # Matrix spec makes no reference to what HTTP status code is returned, - # but the V1 API uses 403 where it means 401, and the webclient - # relies on this behaviour, so V1 gets its own copy of the auth - # with backwards compat behaviour. - orf.TOKEN_NOT_FOUND_HTTP_STATUS = 403 - return orf - def build_state_handler(self): return StateHandler(self) diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index a8d09600bd..f5a7258e68 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -148,11 +148,16 @@ class EventStreamPermissionsTestCase(RestTestCase): @defer.inlineCallbacks def test_stream_basic_permissions(self): - # invalid token, expect 403 + # invalid token, expect 401 + # note: this is in violation of the original v1 spec, which expected + # 403. However, since the v1 spec no longer exists and the v1 + # implementation is now part of the r0 implementation, the newer + # behaviour is used instead to be consistent with the r0 spec. + # see issue #2602 (code, response) = yield self.mock_resource.trigger_get( "/events?access_token=%s" % ("invalid" + self.token, ) ) - self.assertEquals(403, code, msg=str(response)) + self.assertEquals(401, code, msg=str(response)) # valid token, expect content (code, response) = yield self.mock_resource.trigger_get( diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index deac7f100c..dc94b8bd19 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -52,7 +52,7 @@ class ProfileTestCase(unittest.TestCase): def _get_user_by_req(request=None, allow_guest=False): return synapse.types.create_requester(myid) - hs.get_v1auth().get_user_by_req = _get_user_by_req + hs.get_auth().get_user_by_req = _get_user_by_req profile.register_servlets(hs, self.mock_resource) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index d763400eaf..61d737725b 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -60,7 +60,7 @@ class RoomPermissionsTestCase(RestTestCase): "token_id": 1, "is_guest": False, } - hs.get_v1auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -70,7 +70,7 @@ class RoomPermissionsTestCase(RestTestCase): synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource) - self.auth = hs.get_v1auth() + self.auth = hs.get_auth() # create some rooms under the name rmcreator_id self.uncreated_rmid = "!aa:test" @@ -425,7 +425,7 @@ class RoomsMemberListTestCase(RestTestCase): "token_id": 1, "is_guest": False, } - hs.get_v1auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -507,7 +507,7 @@ class RoomsCreateTestCase(RestTestCase): "token_id": 1, "is_guest": False, } - hs.get_v1auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -597,7 +597,7 @@ class RoomTopicTestCase(RestTestCase): "is_guest": False, } - hs.get_v1auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -711,7 +711,7 @@ class RoomMemberStateTestCase(RestTestCase): "token_id": 1, "is_guest": False, } - hs.get_v1auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -843,7 +843,7 @@ class RoomMessagesTestCase(RestTestCase): "token_id": 1, "is_guest": False, } - hs.get_v1auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -945,7 +945,7 @@ class RoomInitialSyncTestCase(RestTestCase): "token_id": 1, "is_guest": False, } - hs.get_v1auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -1017,7 +1017,7 @@ class RoomMessageListTestCase(RestTestCase): "token_id": 1, "is_guest": False, } - hs.get_v1auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index 2ec4ecab5b..fe161ee5cb 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -68,7 +68,7 @@ class RoomTypingTestCase(RestTestCase): "is_guest": False, } - hs.get_v1auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) -- cgit 1.4.1 From 33f469ba19586bbafa0cf2c7d7c35463bdab87eb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 1 May 2018 16:19:39 +0100 Subject: Apply some limits to depth to counter abuse * When creating a new event, cap its depth to 2^63 - 1 * When receiving events, reject any without a sensible depth As per https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI --- synapse/api/constants.py | 3 +++ synapse/federation/federation_base.py | 21 ++++++++++++++++++--- synapse/handlers/message.py | 6 +++++- 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 489efb7f86..5baba43966 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -16,6 +16,9 @@ """Contains constants from the specification.""" +# the "depth" field on events is limited to 2**63 - 1 +MAX_DEPTH = 2**63 - 1 + class Membership(object): diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 79eaa31031..4cc98a3fe8 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -14,7 +14,10 @@ # limitations under the License. import logging -from synapse.api.errors import SynapseError +import six + +from synapse.api.constants import MAX_DEPTH +from synapse.api.errors import SynapseError, Codes from synapse.crypto.event_signing import check_event_content_hash from synapse.events import FrozenEvent from synapse.events.utils import prune_event @@ -190,11 +193,23 @@ def event_from_pdu_json(pdu_json, outlier=False): FrozenEvent Raises: - SynapseError: if the pdu is missing required fields + SynapseError: if the pdu is missing required fields or is otherwise + not a valid matrix event """ # we could probably enforce a bunch of other fields here (room_id, sender, # origin, etc etc) - assert_params_in_request(pdu_json, ('event_id', 'type')) + assert_params_in_request(pdu_json, ('event_id', 'type', 'depth')) + + depth = pdu_json['depth'] + if not isinstance(depth, six.integer_types): + raise SynapseError(400, "Depth %r not an intger" % (depth, ), + Codes.BAD_JSON) + + if depth < 0: + raise SynapseError(400, "Depth too small", Codes.BAD_JSON) + elif depth > MAX_DEPTH: + raise SynapseError(400, "Depth too large", Codes.BAD_JSON) + event = FrozenEvent( pdu_json ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 21628a8540..53beb2b9ab 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -16,7 +16,7 @@ from twisted.internet import defer, reactor from twisted.python.failure import Failure -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventTypes, Membership, MAX_DEPTH from synapse.api.errors import AuthError, Codes, SynapseError from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events.utils import serialize_event @@ -624,6 +624,10 @@ class EventCreationHandler(object): if prev_events_and_hashes: depth = max([d for _, _, d in prev_events_and_hashes]) + 1 + # we cap depth of generated events, to ensure that they are not + # rejected by other servers (and so that they can be persisted in + # the db) + depth = min(depth, MAX_DEPTH) else: depth = 1 -- cgit 1.4.1 From d858f3bd4e0513bea2fae43b23250b979b971407 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Apr 2018 12:34:40 +0100 Subject: Miscellaneous fixes to python_dependencies * add some doc about wtf this thing does * pin Twisted to < 18.4 * add explicit dep on six (fixes #3089) --- synapse/python_dependencies.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 5cabf7dabe..711cbb6c50 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -1,5 +1,6 @@ # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +19,18 @@ from distutils.version import LooseVersion logger = logging.getLogger(__name__) +# this dict maps from python package name to a list of modules we expect it to +# provide. +# +# the key is a "requirement specifier", as used as a parameter to `pip +# install`[1], or an `install_requires` argument to `setuptools.setup` [2]. +# +# the value is a sequence of strings; each entry should be the name of the +# python module, optionally followed by a version assertion which can be either +# ">=" or "==". +# +# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers. +# [2] https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies REQUIREMENTS = { "jsonschema>=2.5.1": ["jsonschema>=2.5.1"], "frozendict>=0.4": ["frozendict"], @@ -26,7 +39,11 @@ REQUIREMENTS = { "signedjson>=1.0.0": ["signedjson>=1.0.0"], "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"], "service_identity>=1.0.0": ["service_identity>=1.0.0"], - "Twisted>=16.0.0": ["twisted>=16.0.0"], + + # we break under Twisted 18.4 + # (https://github.com/matrix-org/synapse/issues/3135) + "Twisted>=16.0.0,<18.4": ["twisted>=16.0.0"], + "pyopenssl>=0.14": ["OpenSSL>=0.14"], "pyyaml": ["yaml"], "pyasn1": ["pyasn1"], @@ -39,6 +56,7 @@ REQUIREMENTS = { "pymacaroons-pynacl": ["pymacaroons"], "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], + "six": ["six"], } CONDITIONAL_REQUIREMENTS = { "web_client": { -- cgit 1.4.1 From 8570bb84ccba5c7e53161e445d13e3aaffbcab1b Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 1 May 2018 18:22:53 +0100 Subject: Update __init__.py bump version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 4924f44d4e..f31cb9a3cb 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.28.0" +__version__ = "0.28.1" -- cgit 1.4.1 From 8e6bd0e32456e66ec8df3af62ecacf291cf4632f Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 1 May 2018 18:28:14 +0100 Subject: changelog for 0.28.1 --- CHANGES.rst | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 74f454cb5b..8da5e0dbf6 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,5 +1,26 @@ -Changes in synapse v0.28.0-rc1 (2018-04-26) -=========================================== +Changes in synapse v0.28.1 (2018-05-01) +======================================= + +SECURITY UPDATE + +* Clamp the allowed values of event depth received over federation to be + [0, 2**63 - 1]. This mitigates an attack where malicious events + injected with depth = 2**63 - 1 render rooms unusable. Depth is used to + determine the cosmetic ordering of events within a room, and so the ordering + of events in such a room will default to using stream_ordering rather than depth + (topological_ordering). + + This is a temporary solution to mitigate abuse in the wild, whilst a long solution + is being implemented to improve how the depth parameter is used. + + Full details at + https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI/edit# + +* Pin Twisted to <18.4 until we stop using the private _OpenSSLECCurve API. + + +Changes in synapse v0.28.0 (2018-04-26) +======================================= Bug Fixes: -- cgit 1.4.1 From 5c2214f4c73e67ea907aea740e8c168fd4735299 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 1 May 2018 19:03:35 +0100 Subject: fix markdown --- CHANGES.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 8da5e0dbf6..cc40855387 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,8 +4,8 @@ Changes in synapse v0.28.1 (2018-05-01) SECURITY UPDATE * Clamp the allowed values of event depth received over federation to be - [0, 2**63 - 1]. This mitigates an attack where malicious events - injected with depth = 2**63 - 1 render rooms unusable. Depth is used to + [0, 2^63 - 1]. This mitigates an attack where malicious events + injected with depth = 2^63 - 1 render rooms unusable. Depth is used to determine the cosmetic ordering of events within a room, and so the ordering of events in such a room will default to using stream_ordering rather than depth (topological_ordering). @@ -14,7 +14,7 @@ SECURITY UPDATE is being implemented to improve how the depth parameter is used. Full details at - https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI/edit# + https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI * Pin Twisted to <18.4 until we stop using the private _OpenSSLECCurve API. -- cgit 1.4.1 From da602419b277308d06ba5e2a147e6d128eaa2802 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 1 May 2018 19:19:23 +0100 Subject: missing word :| --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0dd2efd9d2..317846d2a2 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -10,7 +10,7 @@ SECURITY UPDATE of events in such a room will default to using stream_ordering rather than depth (topological_ordering). - This is a temporary solution to mitigate abuse in the wild, whilst a long solution + This is a temporary solution to mitigate abuse in the wild, whilst a long term solution is being implemented to improve how the depth parameter is used. Full details at -- cgit 1.4.1 From 9f21de6a015a210d9ce5ae71948af98a56046ea2 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 1 May 2018 19:19:46 +0100 Subject: missing word :| --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0dd2efd9d2..317846d2a2 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -10,7 +10,7 @@ SECURITY UPDATE of events in such a room will default to using stream_ordering rather than depth (topological_ordering). - This is a temporary solution to mitigate abuse in the wild, whilst a long solution + This is a temporary solution to mitigate abuse in the wild, whilst a long term solution is being implemented to improve how the depth parameter is used. Full details at -- cgit 1.4.1 From d4c14e143825c679a7909b009aec2e51f829857f Mon Sep 17 00:00:00 2001 From: kaiyou Date: Tue, 1 May 2018 20:47:58 +0200 Subject: Fix the documentation about 'POSTGRES_DB' --- contrib/docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 25c358c847..aed56646c2 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -135,7 +135,7 @@ Shared secrets, that will be initialized to random values if not set: Database specific values (will use SQLite if not set): -* `POSTGRES_DATABASE` - The database name for the synapse postgres database. [default: `matrix`] +* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`] * `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`] * `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy. * `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`] -- cgit 1.4.1 From 4f2e898c29de5b51e8c5893afa64e5b43d25048f Mon Sep 17 00:00:00 2001 From: kaiyou Date: Tue, 1 May 2018 20:49:52 +0200 Subject: Make the logging level configurable --- contrib/docker/conf/log.config | 6 +++--- contrib/docker/docker-compose.yml | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/contrib/docker/conf/log.config b/contrib/docker/conf/log.config index b5c907c4f9..1851995802 100644 --- a/contrib/docker/conf/log.config +++ b/contrib/docker/conf/log.config @@ -17,13 +17,13 @@ handlers: loggers: synapse: - level: INFO + level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }} synapse.storage.SQL: # beware: increasing this to DEBUG will make synapse log sensitive # information such as access tokens. - level: INFO + level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }} root: - level: INFO + level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }} handlers: [console] diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 46e72601d3..0b531949e0 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -15,6 +15,7 @@ services: - SYNAPSE_SERVER_NAME=my.matrix.host - SYNAPSE_REPORT_STATS=no - SYNAPSE_ENABLE_REGISTRATION=yes + - SYNAPSE_LOG_LEVEL=INFO - POSTGRES_PASSWORD=changeme volumes: # You may either store all the files in a local folder -- cgit 1.4.1 From e482f8cd8504b36dc1ce2c1e51e0dee479d33249 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 May 2018 09:12:26 +0100 Subject: Fix incorrect reference to StringIO This was introduced in 4f2f5171 --- synapse/util/logformatter.py | 2 +- tests/util/test_logformatter.py | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 tests/util/test_logformatter.py diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py index 59ab3c6968..3e42868ea9 100644 --- a/synapse/util/logformatter.py +++ b/synapse/util/logformatter.py @@ -32,7 +32,7 @@ class LogFormatter(logging.Formatter): super(LogFormatter, self).__init__(*args, **kwargs) def formatException(self, ei): - sio = StringIO.StringIO() + sio = StringIO() (typ, val, tb) = ei # log the stack above the exception capture point if possible, but diff --git a/tests/util/test_logformatter.py b/tests/util/test_logformatter.py new file mode 100644 index 0000000000..1a1a8412f2 --- /dev/null +++ b/tests/util/test_logformatter.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys + +from synapse.util.logformatter import LogFormatter +from tests import unittest + + +class TestException(Exception): + pass + + +class LogFormatterTestCase(unittest.TestCase): + def test_formatter(self): + formatter = LogFormatter() + + try: + raise TestException("testytest") + except TestException: + ei = sys.exc_info() + + output = formatter.formatException(ei) + + # check the output looks vaguely sane + self.assertIn("testytest", output) + self.assertIn("Capture point", output) -- cgit 1.4.1 From f22e7cda2c72d461acba664cb083e8c4e3c7572a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 May 2018 11:46:23 +0100 Subject: Fix a class of logcontext leaks So, it turns out that if you have a first `Deferred` `D1`, you can add a callback which returns another `Deferred` `D2`, and `D2` must then complete before any further callbacks on `D1` will execute (and later callbacks on `D1` get the *result* of `D2` rather than `D2` itself). So, `D1` might have `called=True` (as in, it has started running its callbacks), but any new callbacks added to `D1` won't get run until `D2` completes - so if you `yield D1` in an `inlineCallbacks` function, your `yield` will 'block'. In conclusion: some of our assumptions in `logcontext` were invalid. We need to make sure that we don't optimise out the logcontext juggling when this situation happens. Fortunately, it is easy to detect by checking `D1.paused`. --- synapse/util/logcontext.py | 60 ++++++++++++++++++++++++-------------- tests/util/test_logcontext.py | 67 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 94 insertions(+), 33 deletions(-) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 01ac71e53e..e086e12213 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -302,7 +302,7 @@ def preserve_fn(f): def run_in_background(f, *args, **kwargs): """Calls a function, ensuring that the current context is restored after return from the function, and that the sentinel context is set once the - deferred returned by the funtion completes. + deferred returned by the function completes. Useful for wrapping functions that return a deferred which you don't yield on (for instance because you want to pass it to deferred.gatherResults()). @@ -320,24 +320,31 @@ def run_in_background(f, *args, **kwargs): # by synchronous exceptions, so let's turn them into Failures. return defer.fail() - if isinstance(res, defer.Deferred) and not res.called: - # The function will have reset the context before returning, so - # we need to restore it now. - LoggingContext.set_current_context(current) - - # The original context will be restored when the deferred - # completes, but there is nothing waiting for it, so it will - # get leaked into the reactor or some other function which - # wasn't expecting it. We therefore need to reset the context - # here. - # - # (If this feels asymmetric, consider it this way: we are - # effectively forking a new thread of execution. We are - # probably currently within a ``with LoggingContext()`` block, - # which is supposed to have a single entry and exit point. But - # by spawning off another deferred, we are effectively - # adding a new exit point.) - res.addBoth(_set_context_cb, LoggingContext.sentinel) + if not isinstance(res, defer.Deferred): + return res + + if res.called and not res.paused: + # The function should have maintained the logcontext, so we can + # optimise out the messing about + return res + + # The function may have reset the context before returning, so + # we need to restore it now. + ctx = LoggingContext.set_current_context(current) + + # The original context will be restored when the deferred + # completes, but there is nothing waiting for it, so it will + # get leaked into the reactor or some other function which + # wasn't expecting it. We therefore need to reset the context + # here. + # + # (If this feels asymmetric, consider it this way: we are + # effectively forking a new thread of execution. We are + # probably currently within a ``with LoggingContext()`` block, + # which is supposed to have a single entry and exit point. But + # by spawning off another deferred, we are effectively + # adding a new exit point.) + res.addBoth(_set_context_cb, ctx) return res @@ -354,9 +361,18 @@ def make_deferred_yieldable(deferred): (This is more-or-less the opposite operation to run_in_background.) """ - if isinstance(deferred, defer.Deferred) and not deferred.called: - prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) - deferred.addBoth(_set_context_cb, prev_context) + if not isinstance(deferred, defer.Deferred): + return deferred + + if deferred.called and not deferred.paused: + # it looks like this deferred is ready to run any callbacks we give it + # immediately. We may as well optimise out the logcontext faffery. + return deferred + + # ok, we can't be sure that a yield won't block, so let's reset the + # logcontext, and add a callback to the deferred to restore it. + prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) + deferred.addBoth(_set_context_cb, prev_context) return deferred diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 4850722bc5..7707fbb1f0 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -36,24 +36,28 @@ class LoggingContextTestCase(unittest.TestCase): yield sleep(0) self._check_test_key("one") - def _test_preserve_fn(self, function): + def _test_run_in_background(self, function): sentinel_context = LoggingContext.current_context() callback_completed = [False] - @defer.inlineCallbacks - def cb(): + def test(): context_one.request = "one" - yield function() - self._check_test_key("one") + d = function() - callback_completed[0] = True + def cb(res): + self._check_test_key("one") + callback_completed[0] = True + return res + d.addCallback(cb) + + return d with LoggingContext() as context_one: context_one.request = "one" # fire off function, but don't wait on it. - logcontext.preserve_fn(cb)() + logcontext.run_in_background(test) self._check_test_key("one") @@ -80,20 +84,31 @@ class LoggingContextTestCase(unittest.TestCase): # test is done once d2 finishes return d2 - def test_preserve_fn_with_blocking_fn(self): + def test_run_in_background_with_blocking_fn(self): @defer.inlineCallbacks def blocking_function(): yield sleep(0) - return self._test_preserve_fn(blocking_function) + return self._test_run_in_background(blocking_function) - def test_preserve_fn_with_non_blocking_fn(self): + def test_run_in_background_with_non_blocking_fn(self): @defer.inlineCallbacks def nonblocking_function(): with logcontext.PreserveLoggingContext(): yield defer.succeed(None) - return self._test_preserve_fn(nonblocking_function) + return self._test_run_in_background(nonblocking_function) + + @unittest.DEBUG + def test_run_in_background_with_chained_deferred(self): + # a function which returns a deferred which looks like it has been + # called, but is actually paused + def testfunc(): + return logcontext.make_deferred_yieldable( + _chained_deferred_function() + ) + + return self._test_run_in_background(testfunc) @defer.inlineCallbacks def test_make_deferred_yieldable(self): @@ -118,6 +133,22 @@ class LoggingContextTestCase(unittest.TestCase): # now it should be restored self._check_test_key("one") + @defer.inlineCallbacks + def test_make_deferred_yieldable_with_chained_deferreds(self): + sentinel_context = LoggingContext.current_context() + + with LoggingContext() as context_one: + context_one.request = "one" + + d1 = logcontext.make_deferred_yieldable(_chained_deferred_function()) + # make sure that the context was reset by make_deferred_yieldable + self.assertIs(LoggingContext.current_context(), sentinel_context) + + yield d1 + + # now it should be restored + self._check_test_key("one") + @defer.inlineCallbacks def test_make_deferred_yieldable_on_non_deferred(self): """Check that make_deferred_yieldable does the right thing when its @@ -132,3 +163,17 @@ class LoggingContextTestCase(unittest.TestCase): r = yield d1 self.assertEqual(r, "bum") self._check_test_key("one") + + +# a function which returns a deferred which has been "called", but +# which had a function which returned another incomplete deferred on +# its callback list, so won't yet call any other new callbacks. +def _chained_deferred_function(): + d = defer.succeed(None) + + def cb(res): + d2 = defer.Deferred() + reactor.callLater(0, d2.callback, res) + return d2 + d.addCallback(cb) + return d -- cgit 1.4.1 From 46beeb9a307febb679fc25565aca439f8af044ed Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 May 2018 15:46:22 +0100 Subject: Fix a couple of logcontext leaks in unit tests ... which were making other, innocent, tests, fail. Plus remove a spurious unittest.DEBUG which was making the output noisy. --- tests/appservice/test_scheduler.py | 11 +++++++++-- tests/storage/test_event_push_actions.py | 1 - 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index e5a902f734..9181692771 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -17,6 +17,8 @@ from synapse.appservice.scheduler import ( _ServiceQueuer, _TransactionController, _Recoverer ) from twisted.internet import defer + +from synapse.util.logcontext import make_deferred_yieldable from ..utils import MockClock from mock import Mock from tests import unittest @@ -204,7 +206,9 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase): def test_send_single_event_with_queue(self): d = defer.Deferred() - self.txn_ctrl.send = Mock(return_value=d) + self.txn_ctrl.send = Mock( + side_effect=lambda x, y: make_deferred_yieldable(d), + ) service = Mock(id=4) event = Mock(event_id="first") event2 = Mock(event_id="second") @@ -235,7 +239,10 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase): srv_2_event2 = Mock(event_id="srv2b") send_return_list = [srv_1_defer, srv_2_defer] - self.txn_ctrl.send = Mock(side_effect=lambda x, y: send_return_list.pop(0)) + + def do_send(x, y): + return make_deferred_yieldable(send_return_list.pop(0)) + self.txn_ctrl.send = Mock(side_effect=do_send) # send events for different ASes and make sure they are sent self.queuer.enqueue(srv1, srv_1_event) diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 575374c6a6..9962ce8a5d 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -128,7 +128,6 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase): yield _rotate(10) yield _assert_counts(1, 1) - @tests.unittest.DEBUG @defer.inlineCallbacks def test_find_first_stream_ordering_after_ts(self): def add_event(so, ts): -- cgit 1.4.1 From 11607006d9570f76a46b44c9c8de13f13550dbcf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 May 2018 15:48:47 +0100 Subject: Remove spurious unittest.DEBUG --- tests/util/test_logcontext.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 7707fbb1f0..ad78d884e0 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -99,7 +99,6 @@ class LoggingContextTestCase(unittest.TestCase): return self._test_run_in_background(nonblocking_function) - @unittest.DEBUG def test_run_in_background_with_chained_deferred(self): # a function which returns a deferred which looks like it has been # called, but is actually paused -- cgit 1.4.1 From be31adb036cab08c4c70cb1e0d0f6550f0a75ef1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 May 2018 13:29:16 +0100 Subject: Fix logcontext leak in media repo Make FileResponder.write_to_consumer uphold the logcontext contract --- synapse/rest/media/v1/_base.py | 1 + synapse/rest/media/v1/media_storage.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index d9c4af9389..c0d2f06855 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -143,6 +143,7 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam respond_404(request) return + logger.debug("Responding to media request with responder %s") add_file_headers(request, media_type, file_size, upload_name) with responder: yield responder.write_to_consumer(request) diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 7f263db239..d23fe10b07 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -255,7 +255,9 @@ class FileResponder(Responder): self.open_file = open_file def write_to_consumer(self, consumer): - return FileSender().beginFileTransfer(self.open_file, consumer) + return make_deferred_yieldable( + FileSender().beginFileTransfer(self.open_file, consumer) + ) def __exit__(self, exc_type, exc_val, exc_tb): self.open_file.close() -- cgit 1.4.1 From 32015e1109bc955697353d8f8088e3f6b538d12c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 May 2018 16:52:42 +0100 Subject: Escape label values in prometheus metrics --- synapse/metrics/metric.py | 22 ++++++++++++++++++++-- tests/metrics/test_metric.py | 21 ++++++++++++++++++++- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py index 89bd47c3f7..1a09e417c9 100644 --- a/synapse/metrics/metric.py +++ b/synapse/metrics/metric.py @@ -16,6 +16,7 @@ from itertools import chain import logging +import re logger = logging.getLogger(__name__) @@ -56,8 +57,7 @@ class BaseMetric(object): return not len(self.labels) def _render_labelvalue(self, value): - # TODO: escape backslashes, quotes and newlines - return '"%s"' % (value) + return '"%s"' % (_escape_label_value(value),) def _render_key(self, values): if self.is_scalar(): @@ -299,3 +299,21 @@ class MemoryUsageMetric(object): "process_psutil_rss:total %d" % sum_rss, "process_psutil_rss:count %d" % len_rss, ] + + +def _escape_character(c): + """Replaces a single character with its escape sequence. + """ + if c == "\\": + return "\\\\" + elif c == "\"": + return "\\\"" + elif c == "\n": + return "\\n" + return c + + +def _escape_label_value(value): + """Takes a label value and escapes quotes, newlines and backslashes + """ + return re.sub(r"([\n\"\\])", lambda m: _escape_character(m.group(1)), value) diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py index 39bde6e3f8..069c0be762 100644 --- a/tests/metrics/test_metric.py +++ b/tests/metrics/test_metric.py @@ -16,7 +16,8 @@ from tests import unittest from synapse.metrics.metric import ( - CounterMetric, CallbackMetric, DistributionMetric, CacheMetric + CounterMetric, CallbackMetric, DistributionMetric, CacheMetric, + _escape_label_value, ) @@ -171,3 +172,21 @@ class CacheMetricTestCase(unittest.TestCase): 'cache:size{name="cache_name"} 1', 'cache:evicted_size{name="cache_name"} 2', ]) + + +class LabelValueEscapeTestCase(unittest.TestCase): + def test_simple(self): + string = "safjhsdlifhyskljfksdfh" + self.assertEqual(string, _escape_label_value(string)) + + def test_escape(self): + self.assertEqual( + "abc\\\"def\\nghi\\\\", + _escape_label_value("abc\"def\nghi\\"), + ) + + def test_sequence_of_escapes(self): + self.assertEqual( + "abc\\\"def\\nghi\\\\\\n", + _escape_label_value("abc\"def\nghi\\\n"), + ) -- cgit 1.4.1 From a41117c63b4e7ef2021fb59cb521c77d72f2bf70 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 May 2018 17:27:27 +0100 Subject: Make _escape_character take MatchObject --- synapse/metrics/metric.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py index 1a09e417c9..fbba94e633 100644 --- a/synapse/metrics/metric.py +++ b/synapse/metrics/metric.py @@ -301,9 +301,17 @@ class MemoryUsageMetric(object): ] -def _escape_character(c): +def _escape_character(m): """Replaces a single character with its escape sequence. + + Args: + m (re.MatchObject): A match object whose first group is the single + character to replace + + Returns: + str """ + c = m.group(1) if c == "\\": return "\\\\" elif c == "\"": @@ -316,4 +324,4 @@ def _escape_character(c): def _escape_label_value(value): """Takes a label value and escapes quotes, newlines and backslashes """ - return re.sub(r"([\n\"\\])", lambda m: _escape_character(m.group(1)), value) + return re.sub(r"([\n\"\\])", _escape_character, value) -- cgit 1.4.1 From 1dfd650348251508ac044b3db114092b1e857a74 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 May 2018 22:42:36 +0100 Subject: add missing param to cancelled_to_request_timed_out_error This gets two arguments, not one. --- synapse/http/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 0d47ccdb59..054372e179 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -25,7 +25,7 @@ class RequestTimedOutError(SynapseError): super(RequestTimedOutError, self).__init__(504, "Timed out") -def cancelled_to_request_timed_out_error(value): +def cancelled_to_request_timed_out_error(value, timeout): """Turns CancelledErrors into RequestTimedOutErrors. For use with async.add_timeout_to_deferred -- cgit 1.4.1 From 31c7c29d4304f7577a1f5dc1740fe3a248351500 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 May 2018 10:38:08 +0100 Subject: Fix up grammar --- synapse/storage/events.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9269707df..05cde96afc 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -556,9 +556,9 @@ class EventsStore(EventsWorkerStore): state_groups_map[ctx.state_group] = ctx.current_state_ids - # We need to map the event_ids to their state groups. First, lets - # check if the event is one we're persisting and then we can pull the - # state group from its context. + # We need to map the event_ids to their state groups. First, let's + # check if the event is one we're persisting, in which case we can + # pull the state group from its context. # Otherwise we need to pull the state group from the database. # Set of events we need to fetch groups for. (We know none of the old -- cgit 1.4.1 From a0501ac57ea597efaa32e66c57c2f87036ec8b32 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 3 May 2018 10:51:39 +0100 Subject: Warn of potential client incompatibility from #3161 --- CHANGES.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 317846d2a2..94b83027e2 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,18 @@ +Changes in synapse +=============================== + +Potentially breaking change: + +* Make Client-Server API return 403 for invalid token (PR #3161). + + This changes the Client-server spec to return a 403 error code instead of 401 + when the access token is unrecognised. This is the behaviour required by the + specification, but some clients may be relying on the old, incorrect + behaviour. + + Thanks to @NotAFile for fixing this. + + Changes in synapse v0.28.1 (2018-05-01) ======================================= -- cgit 1.4.1 From d72faf2fad9cd4a2f7a82bc35d7e74c36fc49e4f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 3 May 2018 10:56:42 +0100 Subject: Fix changes warning --- CHANGES.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 94b83027e2..9d40b2ac1e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -3,9 +3,9 @@ Changes in synapse Potentially breaking change: -* Make Client-Server API return 403 for invalid token (PR #3161). +* Make Client-Server API return 401 for invalid token (PR #3161). - This changes the Client-server spec to return a 403 error code instead of 401 + This changes the Client-server spec to return a 401 error code instead of 403 when the access token is unrecognised. This is the behaviour required by the specification, but some clients may be relying on the old, incorrect behaviour. -- cgit 1.4.1 From 2e7a94c36ba75a94114e4a9fb1aa6d4e3444e8b4 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Thu, 3 May 2018 12:31:47 +0100 Subject: Don't abortConnection() if the transport connection has already closed. --- synapse/http/endpoint.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index d8923c9abb..564ae4c10d 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -113,10 +113,15 @@ class _WrappedConnection(object): if time.time() - self.last_request >= 2.5 * 60: self.abort() # Abort the underlying TLS connection. The abort() method calls - # loseConnection() on the underlying TLS connection which tries to + # loseConnection() on the TLS connection which tries to # shutdown the connection cleanly. We call abortConnection() - # since that will promptly close the underlying TCP connection. - self.transport.abortConnection() + # since that will promptly close the TLS connection. + # + # In Twisted >18.4; the TLS connection will be None if it has closed + # which will make abortConnection() throw. Check that the TLS connection + # is not None before trying to close it. + if self.transport.getHandle() is not None: + self.transport.abortConnection() def request(self, request): self.last_request = time.time() -- cgit 1.4.1 From a7fe62f0cb7bdb40e5c9b7f09f694e9c0913610c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 3 May 2018 11:16:36 +0100 Subject: Fix logcontext leaks in rate limiter --- synapse/util/ratelimitutils.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 18424f6c36..0ab63c3d7d 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -18,7 +18,10 @@ from twisted.internet import defer from synapse.api.errors import LimitExceededError from synapse.util.async import sleep -from synapse.util.logcontext import run_in_background +from synapse.util.logcontext import ( + run_in_background, make_deferred_yieldable, + PreserveLoggingContext, +) import collections import contextlib @@ -176,6 +179,9 @@ class _PerHostRatelimiter(object): return r def on_err(r): + # XXX: why is this necessary? this is called before we start + # processing the request so why would the request be in + # current_processing? self.current_processing.discard(request_id) return r @@ -187,7 +193,7 @@ class _PerHostRatelimiter(object): ret_defer.addCallbacks(on_start, on_err) ret_defer.addBoth(on_both) - return ret_defer + return make_deferred_yieldable(ret_defer) def _on_exit(self, request_id): logger.debug( @@ -197,7 +203,12 @@ class _PerHostRatelimiter(object): self.current_processing.discard(request_id) try: request_id, deferred = self.ready_request_queue.popitem() + + # XXX: why do we do the following? the on_start callback above will + # do it for us. self.current_processing.add(request_id) - deferred.callback(None) + + with PreserveLoggingContext(): + deferred.callback(None) except KeyError: pass -- cgit 1.4.1 From 95b6912045b5d23ae823d6733beb7f315b010528 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 May 2018 15:51:04 +0100 Subject: Fix metrics that have integer value labels --- synapse/metrics/metric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py index fbba94e633..98d5ca7b6c 100644 --- a/synapse/metrics/metric.py +++ b/synapse/metrics/metric.py @@ -324,4 +324,4 @@ def _escape_character(m): def _escape_label_value(value): """Takes a label value and escapes quotes, newlines and backslashes """ - return re.sub(r"([\n\"\\])", _escape_character, value) + return re.sub(r"([\n\"\\])", _escape_character, str(value)) -- cgit 1.4.1 From 6d8ec3462d33daa5d390fca206ec2e281c27c38f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 May 2018 16:25:05 +0100 Subject: Note that label values can be anything --- synapse/metrics/metric.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py index 98d5ca7b6c..f421e7a93f 100644 --- a/synapse/metrics/metric.py +++ b/synapse/metrics/metric.py @@ -71,7 +71,8 @@ class BaseMetric(object): """Render this metric for a single set of labels Args: - label_values (list[str]): values for each of the labels + label_values (list[object]): values for each of the labels, + (which get stringified). value: value of the metric at with these labels Returns: -- cgit 1.4.1 From 5addeaa02c432241af282900b19172ba7158118c Mon Sep 17 00:00:00 2001 From: kaiyou Date: Fri, 4 May 2018 21:23:01 +0200 Subject: Add Docker packaging in the author list --- AUTHORS.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/AUTHORS.rst b/AUTHORS.rst index 3dcb1c2a89..e13ac5ad34 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -60,3 +60,6 @@ Niklas Riekenbrauck Christoph Witzany * Add LDAP support for authentication + +Pierre Jaury +* Docker packaging \ No newline at end of file -- cgit 1.4.1 From 88868b283994b9a73634d7153fd7104257475349 Mon Sep 17 00:00:00 2001 From: Konstantinos Sideris Date: Sat, 5 May 2018 12:55:02 +0300 Subject: notifications: Convert next_token to string according to the spec Currently the parameter is serialized as an integer. Signed-off-by: Konstantinos Sideris --- synapse/rest/client/v2_alpha/notifications.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index ec170109fe..66583d6778 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -88,7 +88,7 @@ class NotificationsServlet(RestServlet): pa["topological_ordering"], pa["stream_ordering"] ) returned_push_actions.append(returned_pa) - next_token = pa["stream_ordering"] + next_token = str(pa["stream_ordering"]) defer.returnValue((200, { "notifications": returned_push_actions, -- cgit 1.4.1 From 06c0d0ed081b56716135c4881e600861b2e8cad5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 May 2018 15:45:38 +0100 Subject: Split paginate_room_events storage function --- synapse/storage/stream.py | 100 +++++++++++++++++++++++++++++++++------------- 1 file changed, 72 insertions(+), 28 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index f0784ba137..b57a8a7ef6 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -738,17 +738,28 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def has_room_changed_since(self, room_id, stream_id): return self._events_stream_cache.has_entity_changed(room_id, stream_id) + def paginate_room_events_txn(self, txn, room_id, from_key, to_key=None, + direction='b', limit=-1, event_filter=None): + """Returns list of events before or after a given token. -class StreamStore(StreamWorkerStore): - def get_room_max_stream_ordering(self): - return self._stream_id_gen.get_current_token() - - def get_room_min_stream_ordering(self): - return self._backfill_id_gen.get_current_token() + Args: + txn + room_id (str) + from_key (str): The token used to stream from + to_key (str|None): A token which if given limits the results to + only those before + direction(char): Either 'b' or 'f' to indicate whether we are + paginating forwards or backwards from `from_key`. + limit (int): The maximum number of events to return. Zero or less + means no limit. + event_filter (Filter|None): If provided filters the events to + those that match the filter. - @defer.inlineCallbacks - def paginate_room_events(self, room_id, from_key, to_key=None, - direction='b', limit=-1, event_filter=None): + Returns: + tuple[list[dict], str]: Returns the results as a list of dicts and + a token that points to the end of the result set. The dicts have + the keys "event_id", "toplogical_ordering" and "stream_orderign". + """ # Tokens really represent positions between elements, but we use # the convention of pointing to the event before the gap. Hence # we have a bit of asymmetry when it comes to equalities. @@ -795,29 +806,54 @@ class StreamStore(StreamWorkerStore): "limit": limit_str } - def f(txn): - txn.execute(sql, args) + txn.execute(sql, args) - rows = self.cursor_to_dict(txn) + rows = self.cursor_to_dict(txn) - if rows: - topo = rows[-1]["topological_ordering"] - toke = rows[-1]["stream_ordering"] - if direction == 'b': - # Tokens are positions between events. - # This token points *after* the last event in the chunk. - # We need it to point to the event before it in the chunk - # when we are going backwards so we subtract one from the - # stream part. - toke -= 1 - next_token = str(RoomStreamToken(topo, toke)) - else: - # TODO (erikj): We should work out what to do here instead. - next_token = to_key if to_key else from_key + if rows: + topo = rows[-1]["topological_ordering"] + toke = rows[-1]["stream_ordering"] + if direction == 'b': + # Tokens are positions between events. + # This token points *after* the last event in the chunk. + # We need it to point to the event before it in the chunk + # when we are going backwards so we subtract one from the + # stream part. + toke -= 1 + next_token = str(RoomStreamToken(topo, toke)) + else: + # TODO (erikj): We should work out what to do here instead. + next_token = to_key if to_key else from_key + + return rows, next_token, - return rows, next_token, + @defer.inlineCallbacks + def paginate_room_events(self, room_id, from_key, to_key=None, + direction='b', limit=-1, event_filter=None): + """Returns list of events before or after a given token. - rows, token = yield self.runInteraction("paginate_room_events", f) + Args: + room_id (str) + from_key (str): The token used to stream from + to_key (str|None): A token which if given limits the results to + only those before + direction(char): Either 'b' or 'f' to indicate whether we are + paginating forwards or backwards from `from_key`. + limit (int): The maximum number of events to return. Zero or less + means no limit. + event_filter (Filter|None): If provided filters the events to + those that match the filter. + + Returns: + tuple[list[dict], str]: Returns the results as a list of dicts and + a token that points to the end of the result set. The dicts have + the keys "event_id", "toplogical_ordering" and "stream_orderign". + """ + + rows, token = yield self.runInteraction( + "paginate_room_events", self.paginate_room_events_txn, + room_id, from_key, to_key, direction, limit, event_filter, + ) events = yield self._get_events( [r["event_id"] for r in rows], @@ -827,3 +863,11 @@ class StreamStore(StreamWorkerStore): self._set_before_and_after(events, rows) defer.returnValue((events, token)) + + +class StreamStore(StreamWorkerStore): + def get_room_max_stream_ordering(self): + return self._stream_id_gen.get_current_token() + + def get_room_min_stream_ordering(self): + return self._backfill_id_gen.get_current_token() -- cgit 1.4.1 From 274b8c6025e15eede0137bcb6a73ac00bf370ee8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 May 2018 16:15:07 +0100 Subject: Only fetch required fields from database --- synapse/storage/stream.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index b57a8a7ef6..d3adb0bf37 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -796,7 +796,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): limit_str = "" sql = ( - "SELECT * FROM events" + "SELECT event_id, topological_ordering, stream_ordering" + " FROM events" " WHERE outlier = ? AND room_id = ? AND %(bounds)s" " ORDER BY topological_ordering %(order)s," " stream_ordering %(order)s %(limit)s" -- cgit 1.4.1 From 3e6d306e94327983b1f6b66cec9faace642d3f16 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 May 2018 16:18:58 +0100 Subject: Parse tokens before calling DB function --- synapse/storage/stream.py | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index d3adb0bf37..ce98587608 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -738,16 +738,16 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def has_room_changed_since(self, room_id, stream_id): return self._events_stream_cache.has_entity_changed(room_id, stream_id) - def paginate_room_events_txn(self, txn, room_id, from_key, to_key=None, + def paginate_room_events_txn(self, txn, room_id, from_token, to_token=None, direction='b', limit=-1, event_filter=None): """Returns list of events before or after a given token. Args: txn room_id (str) - from_key (str): The token used to stream from - to_key (str|None): A token which if given limits the results to - only those before + from_token (RoomStreamToken): The token used to stream from + to_token (RoomStreamToken|None): A token which if given limits the + results to only those before direction(char): Either 'b' or 'f' to indicate whether we are paginating forwards or backwards from `from_key`. limit (int): The maximum number of events to return. Zero or less @@ -757,7 +757,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Returns: tuple[list[dict], str]: Returns the results as a list of dicts and - a token that points to the end of the result set. The dicts have + a token that points to the end of the result set. The dicts haveq the keys "event_id", "toplogical_ordering" and "stream_orderign". """ # Tokens really represent positions between elements, but we use @@ -767,20 +767,20 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if direction == 'b': order = "DESC" bounds = upper_bound( - RoomStreamToken.parse(from_key), self.database_engine + from_token, self.database_engine ) - if to_key: + if to_token: bounds = "%s AND %s" % (bounds, lower_bound( - RoomStreamToken.parse(to_key), self.database_engine + to_token, self.database_engine )) else: order = "ASC" bounds = lower_bound( - RoomStreamToken.parse(from_key), self.database_engine + from_token, self.database_engine ) - if to_key: + if to_token: bounds = "%s AND %s" % (bounds, upper_bound( - RoomStreamToken.parse(to_key), self.database_engine + to_token, self.database_engine )) filter_clause, filter_args = filter_to_clause(event_filter) @@ -821,12 +821,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # when we are going backwards so we subtract one from the # stream part. toke -= 1 - next_token = str(RoomStreamToken(topo, toke)) + next_token = RoomStreamToken(topo, toke) else: # TODO (erikj): We should work out what to do here instead. - next_token = to_key if to_key else from_key + next_token = to_token if to_token else from_token - return rows, next_token, + return rows, str(next_token), @defer.inlineCallbacks def paginate_room_events(self, room_id, from_key, to_key=None, @@ -851,6 +851,10 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): the keys "event_id", "toplogical_ordering" and "stream_orderign". """ + from_key = RoomStreamToken.parse(from_key) + if to_key: + to_key = RoomStreamToken.parse(to_key) + rows, token = yield self.runInteraction( "paginate_room_events", self.paginate_room_events_txn, room_id, from_key, to_key, direction, limit, event_filter, -- cgit 1.4.1 From 696f5324539b014a5a9613c3a534abb2b3725dac Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 May 2018 16:19:33 +0100 Subject: Reuse existing pagination code for context API --- synapse/storage/stream.py | 90 ++++++++--------------------------------------- 1 file changed, 15 insertions(+), 75 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index ce98587608..938a8809dc 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -42,7 +42,7 @@ from synapse.util.caches.descriptors import cached from synapse.types import RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.logcontext import make_deferred_yieldable, run_in_background -from synapse.storage.engines import PostgresEngine, Sqlite3Engine +from synapse.storage.engines import PostgresEngine import abc import logging @@ -595,88 +595,28 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): retcols=["stream_ordering", "topological_ordering"], ) - token = RoomStreamToken( - results["topological_ordering"], + # Paginating backwards includes the event at the token, but paginating + # forward doesn't. + before_token = RoomStreamToken( + results["topological_ordering"] - 1, results["stream_ordering"], ) - if isinstance(self.database_engine, Sqlite3Engine): - # SQLite3 doesn't optimise ``(x < a) OR (x = a AND y < b)`` - # So we give pass it to SQLite3 as the UNION ALL of the two queries. - - query_before = ( - "SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND topological_ordering < ?" - " UNION ALL" - " SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering < ?" - " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?" - ) - before_args = ( - room_id, token.topological, - room_id, token.topological, token.stream, - before_limit, - ) - - query_after = ( - "SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND topological_ordering > ?" - " UNION ALL" - " SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering > ?" - " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?" - ) - after_args = ( - room_id, token.topological, - room_id, token.topological, token.stream, - after_limit, - ) - else: - query_before = ( - "SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND %s" - " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?" - ) % (upper_bound(token, self.database_engine, inclusive=False),) - - before_args = (room_id, before_limit) - - query_after = ( - "SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND %s" - " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?" - ) % (lower_bound(token, self.database_engine, inclusive=False),) - - after_args = (room_id, after_limit) - - txn.execute(query_before, before_args) + after_token = RoomStreamToken( + results["topological_ordering"], + results["stream_ordering"], + ) - rows = self.cursor_to_dict(txn) + rows, start_token = self.paginate_room_events_txn( + txn, room_id, before_token, direction='b', limit=before_limit, + ) events_before = [r["event_id"] for r in rows] - if rows: - start_token = str(RoomStreamToken( - rows[0]["topological_ordering"], - rows[0]["stream_ordering"] - 1, - )) - else: - start_token = str(RoomStreamToken( - token.topological, - token.stream - 1, - )) - - txn.execute(query_after, after_args) - - rows = self.cursor_to_dict(txn) + rows, end_token = self.paginate_room_events_txn( + txn, room_id, after_token, direction='f', limit=after_limit, + ) events_after = [r["event_id"] for r in rows] - if rows: - end_token = str(RoomStreamToken( - rows[-1]["topological_ordering"], - rows[-1]["stream_ordering"], - )) - else: - end_token = str(token) - return { "before": { "event_ids": events_before, -- cgit 1.4.1 From 6c957e26f06f5806ae2e69838865ee2990a75258 Mon Sep 17 00:00:00 2001 From: rubo77 Date: Wed, 9 May 2018 00:14:01 +0200 Subject: nuke-room-from-db.sh: added postgresql option and help --- scripts-dev/nuke-room-from-db.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/scripts-dev/nuke-room-from-db.sh b/scripts-dev/nuke-room-from-db.sh index 1201d176c2..cd74d78d6a 100755 --- a/scripts-dev/nuke-room-from-db.sh +++ b/scripts-dev/nuke-room-from-db.sh @@ -6,9 +6,19 @@ ## Do not run it lightly. +set -e + +if [ "$1" == "-h" ] || [ "$1" == "" ]; then + echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run" + echo " nuke-room-from-db.sh | sqlite3 homeserver.db" + echo "or" + echo " nuke-room-from-db.sh | psql --dbname=synapse" + exit +fi + ROOMID="$1" -sqlite3 homeserver.db < Date: Wed, 9 May 2018 00:21:00 +0200 Subject: Dont nuke non-existing table event_search_content --- scripts-dev/nuke-room-from-db.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts-dev/nuke-room-from-db.sh b/scripts-dev/nuke-room-from-db.sh index 1201d176c2..53617f21f1 100755 --- a/scripts-dev/nuke-room-from-db.sh +++ b/scripts-dev/nuke-room-from-db.sh @@ -29,7 +29,6 @@ DELETE FROM state_groups WHERE room_id = '$ROOMID'; DELETE FROM state_groups_state WHERE room_id = '$ROOMID'; DELETE FROM receipts_graph WHERE room_id = '$ROOMID'; DELETE FROM receipts_linearized WHERE room_id = '$ROOMID'; -DELETE FROM event_search_content WHERE c1room_id = '$ROOMID'; DELETE FROM guest_access WHERE room_id = '$ROOMID'; DELETE FROM history_visibility WHERE room_id = '$ROOMID'; DELETE FROM room_tags WHERE room_id = '$ROOMID'; -- cgit 1.4.1 From d11b8b6b659957b28c2bbf8bc6ca060284097377 Mon Sep 17 00:00:00 2001 From: rubo77 Date: Wed, 9 May 2018 00:46:47 +0200 Subject: nuke-room-from-db.sh: nuke from table event_search too --- scripts-dev/nuke-room-from-db.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts-dev/nuke-room-from-db.sh b/scripts-dev/nuke-room-from-db.sh index 53617f21f1..2cfd71b0b8 100755 --- a/scripts-dev/nuke-room-from-db.sh +++ b/scripts-dev/nuke-room-from-db.sh @@ -29,6 +29,7 @@ DELETE FROM state_groups WHERE room_id = '$ROOMID'; DELETE FROM state_groups_state WHERE room_id = '$ROOMID'; DELETE FROM receipts_graph WHERE room_id = '$ROOMID'; DELETE FROM receipts_linearized WHERE room_id = '$ROOMID'; +DELETE FROM event_search WHERE room_id = '$ROOMID'; DELETE FROM guest_access WHERE room_id = '$ROOMID'; DELETE FROM history_visibility WHERE room_id = '$ROOMID'; DELETE FROM room_tags WHERE room_id = '$ROOMID'; -- cgit 1.4.1 From 23ec51c94ce90fd70931fbe98cadb7dd51a9db4e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 09:55:19 +0100 Subject: Fix up comments and make function private --- synapse/storage/stream.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 938a8809dc..54be025401 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -607,12 +607,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): results["stream_ordering"], ) - rows, start_token = self.paginate_room_events_txn( + rows, start_token = self._paginate_room_events_txn( txn, room_id, before_token, direction='b', limit=before_limit, ) events_before = [r["event_id"] for r in rows] - rows, end_token = self.paginate_room_events_txn( + rows, end_token = self._paginate_room_events_txn( txn, room_id, after_token, direction='f', limit=after_limit, ) events_after = [r["event_id"] for r in rows] @@ -678,8 +678,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def has_room_changed_since(self, room_id, stream_id): return self._events_stream_cache.has_entity_changed(room_id, stream_id) - def paginate_room_events_txn(self, txn, room_id, from_token, to_token=None, - direction='b', limit=-1, event_filter=None): + def _paginate_room_events_txn(self, txn, room_id, from_token, to_token=None, + direction='b', limit=-1, event_filter=None): """Returns list of events before or after a given token. Args: @@ -697,8 +697,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Returns: tuple[list[dict], str]: Returns the results as a list of dicts and - a token that points to the end of the result set. The dicts haveq - the keys "event_id", "toplogical_ordering" and "stream_orderign". + a token that points to the end of the result set. The dicts have + the keys "event_id", "toplogical_ordering" and "stream_ordering". """ # Tokens really represent positions between elements, but we use # the convention of pointing to the event before the gap. Hence @@ -796,7 +796,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_key = RoomStreamToken.parse(to_key) rows, token = yield self.runInteraction( - "paginate_room_events", self.paginate_room_events_txn, + "paginate_room_events", self._paginate_room_events_txn, room_id, from_key, to_key, direction, limit, event_filter, ) -- cgit 1.4.1 From 27cf170558865d0afeea5d4c2524e8664c28323b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 10:53:29 +0100 Subject: Refactor recent events func to use pagination func This also removes a cache that is unlikely to ever get hit. --- synapse/storage/stream.py | 75 +++++++++++++++++------------------------------ 1 file changed, 27 insertions(+), 48 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 54be025401..da43bb1321 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -38,7 +38,6 @@ from twisted.internet import defer from synapse.storage._base import SQLBaseStore from synapse.storage.events import EventsWorkerStore -from synapse.util.caches.descriptors import cached from synapse.types import RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.logcontext import make_deferred_yieldable, run_in_background @@ -363,60 +362,40 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue((events, token)) - @cached(num_args=4) + @defer.inlineCallbacks def get_recent_event_ids_for_room(self, room_id, limit, end_token, from_token=None): - end_token = RoomStreamToken.parse_stream_token(end_token) - - if from_token is None: - sql = ( - "SELECT stream_ordering, topological_ordering, event_id" - " FROM events" - " WHERE room_id = ? AND stream_ordering <= ? AND outlier = ?" - " ORDER BY topological_ordering DESC, stream_ordering DESC" - " LIMIT ?" - ) - else: - from_token = RoomStreamToken.parse_stream_token(from_token) - sql = ( - "SELECT stream_ordering, topological_ordering, event_id" - " FROM events" - " WHERE room_id = ? AND stream_ordering > ?" - " AND stream_ordering <= ? AND outlier = ?" - " ORDER BY topological_ordering DESC, stream_ordering DESC" - " LIMIT ?" - ) - - def get_recent_events_for_room_txn(txn): - if from_token is None: - txn.execute(sql, (room_id, end_token.stream, False, limit,)) - else: - txn.execute(sql, ( - room_id, from_token.stream, end_token.stream, False, limit - )) + """Get the most recent events in the room in topological ordering. - rows = self.cursor_to_dict(txn) + Args: + room_id (str) + limit (int) + end_token (str): The stream token representing now. + from_token(str|None): Token to not return events before, if given. - rows.reverse() # As we selected with reverse ordering + Returns: + Deferred[tuple[list[dict], tuple[str, str]]]: Returns a list of + dicts (which include event_ids, etc), and a tuple for + `(start_token, end_token)` representing the range of rows + returned. + The returned events are in ascending order. + """ + # Allow a zero limit here, and no-op. + if limit == 0: + defer.returnValue(([], (end_token, end_token))) - if rows: - # Tokens are positions between events. - # This token points *after* the last event in the chunk. - # We need it to point to the event before it in the chunk - # since we are going backwards so we subtract one from the - # stream part. - topo = rows[0]["topological_ordering"] - toke = rows[0]["stream_ordering"] - 1 - start_token = str(RoomStreamToken(topo, toke)) + end_token = RoomStreamToken.parse_stream_token(end_token) + if from_token is not None: + from_token = RoomStreamToken.parse(from_token) - token = (start_token, str(end_token)) - else: - token = (str(end_token), str(end_token)) + rows, token = yield self.runInteraction( + "get_recent_event_ids_for_room", self._paginate_room_events_txn, + room_id, from_token=end_token, to_token=from_token, limit=limit, + ) - return rows, token + # We want to return the results in ascending order. + rows.reverse() - return self.runInteraction( - "get_recent_events_for_room", get_recent_events_for_room_txn - ) + defer.returnValue((rows, (token, str(end_token)))) def get_room_event_after_stream_ordering(self, room_id, stream_ordering): """Gets details of the first event in a room at or after a stream ordering -- cgit 1.4.1 From 7dd13415dbf82edeaae0b94f835e20025964e1b4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 10:58:16 +0100 Subject: Remove unused from_token param --- synapse/storage/stream.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index da43bb1321..ecd39074b8 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -346,9 +346,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue(ret) @defer.inlineCallbacks - def get_recent_events_for_room(self, room_id, limit, end_token, from_token=None): + def get_recent_events_for_room(self, room_id, limit, end_token): rows, token = yield self.get_recent_event_ids_for_room( - room_id, limit, end_token, from_token + room_id, limit, end_token, ) logger.debug("stream before") @@ -363,14 +363,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue((events, token)) @defer.inlineCallbacks - def get_recent_event_ids_for_room(self, room_id, limit, end_token, from_token=None): + def get_recent_event_ids_for_room(self, room_id, limit, end_token): """Get the most recent events in the room in topological ordering. Args: room_id (str) limit (int) end_token (str): The stream token representing now. - from_token(str|None): Token to not return events before, if given. Returns: Deferred[tuple[list[dict], tuple[str, str]]]: Returns a list of @@ -384,12 +383,10 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue(([], (end_token, end_token))) end_token = RoomStreamToken.parse_stream_token(end_token) - if from_token is not None: - from_token = RoomStreamToken.parse(from_token) rows, token = yield self.runInteraction( "get_recent_event_ids_for_room", self._paginate_room_events_txn, - room_id, from_token=end_token, to_token=from_token, limit=limit, + room_id, from_token=end_token, limit=limit, ) # We want to return the results in ascending order. -- cgit 1.4.1 From 05e0a2462c76be6987c7ec3d9517d500583bac65 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 11:18:23 +0100 Subject: Refactor pagination DB API to return concrete type This makes it easier to document what is being returned by the storage functions and what some functions expect as arguments. --- synapse/storage/stream.py | 76 ++++++++++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 28 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index ecd39074b8..772d2c6198 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -47,6 +47,7 @@ import abc import logging from six.moves import range +from collections import namedtuple logger = logging.getLogger(__name__) @@ -59,6 +60,12 @@ _STREAM_TOKEN = "stream" _TOPOLOGICAL_TOKEN = "topological" +# Used as return values for pagination APIs +_EventDictReturn = namedtuple("_EventDictReturn", ( + "event_id", "topological_ordering", "stream_ordering", +)) + + def lower_bound(token, engine, inclusive=False): inclusive = "=" if inclusive else "" if token.topological is None: @@ -256,9 +263,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): " ORDER BY stream_ordering %s LIMIT ?" ) % (order,) txn.execute(sql, (room_id, from_id, to_id, limit)) + + rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] else: sql = ( - "SELECT event_id, stream_ordering FROM events WHERE" + "SELECT event_id, topological_ordering, stream_ordering" + " FROM events" + " WHERE" " room_id = ?" " AND not outlier" " AND stream_ordering <= ?" @@ -266,14 +277,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) % (order, order,) txn.execute(sql, (room_id, to_id, limit)) - rows = self.cursor_to_dict(txn) + rows = [_EventDictReturn(row[0], row[1], row[2]) for row in txn] return rows rows = yield self.runInteraction("get_room_events_stream_for_room", f) ret = yield self._get_events( - [r["event_id"] for r in rows], + [r.event_id for r in rows], get_prev_content=True ) @@ -283,7 +294,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ret.reverse() if rows: - key = "s%d" % min(r["stream_ordering"] for r in rows) + key = "s%d" % min(r.stream_ordering for r in rows) else: # Assume we didn't get anything because there was nothing to # get. @@ -330,14 +341,15 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): " ORDER BY stream_ordering ASC" ) txn.execute(sql, (user_id, to_id,)) - rows = self.cursor_to_dict(txn) + + rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] return rows rows = yield self.runInteraction("get_membership_changes_for_user", f) ret = yield self._get_events( - [r["event_id"] for r in rows], + [r.event_id for r in rows], get_prev_content=True ) @@ -353,14 +365,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): logger.debug("stream before") events = yield self._get_events( - [r["event_id"] for r in rows], + [r.event_id for r in rows], get_prev_content=True ) logger.debug("stream after") self._set_before_and_after(events, rows) - defer.returnValue((events, token)) + defer.returnValue((events, (token, end_token))) @defer.inlineCallbacks def get_recent_event_ids_for_room(self, room_id, limit, end_token): @@ -372,15 +384,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): end_token (str): The stream token representing now. Returns: - Deferred[tuple[list[dict], tuple[str, str]]]: Returns a list of - dicts (which include event_ids, etc), and a tuple for - `(start_token, end_token)` representing the range of rows - returned. - The returned events are in ascending order. + Deferred[tuple[list[_EventDictReturn], str]]: Returns a list of + _EventDictReturn and a token pointint to the start of the returned + events. + The events returned are in ascending order. """ # Allow a zero limit here, and no-op. if limit == 0: - defer.returnValue(([], (end_token, end_token))) + defer.returnValue(([], end_token)) end_token = RoomStreamToken.parse_stream_token(end_token) @@ -392,7 +403,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # We want to return the results in ascending order. rows.reverse() - defer.returnValue((rows, (token, str(end_token)))) + defer.returnValue((rows, token)) def get_room_event_after_stream_ordering(self, room_id, stream_ordering): """Gets details of the first event in a room at or after a stream ordering @@ -496,10 +507,19 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): @staticmethod def _set_before_and_after(events, rows, topo_order=True): + """Inserts ordering information to events' internal metadata from + the DB rows. + + Args: + events (list[FrozenEvent]) + rows (list[_EventDictReturn]) + topo_order (bool): Whether the events were ordered topologically + or by stream ordering + """ for event, row in zip(events, rows): - stream = row["stream_ordering"] - if topo_order: - topo = event.depth + stream = row.stream_ordering + if topo_order and row.topological_ordering: + topo = row.topological_ordering else: topo = None internal = event.internal_metadata @@ -586,12 +606,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): rows, start_token = self._paginate_room_events_txn( txn, room_id, before_token, direction='b', limit=before_limit, ) - events_before = [r["event_id"] for r in rows] + events_before = [r.event_id for r in rows] rows, end_token = self._paginate_room_events_txn( txn, room_id, after_token, direction='f', limit=after_limit, ) - events_after = [r["event_id"] for r in rows] + events_after = [r.event_id for r in rows] return { "before": { @@ -672,9 +692,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): those that match the filter. Returns: - tuple[list[dict], str]: Returns the results as a list of dicts and - a token that points to the end of the result set. The dicts have - the keys "event_id", "toplogical_ordering" and "stream_ordering". + tuple[list[_EventDictReturn], str]: Returns the results as a list + of _EventDictReturn and a token that points to the end of the + result set. """ # Tokens really represent positions between elements, but we use # the convention of pointing to the event before the gap. Hence @@ -725,11 +745,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): txn.execute(sql, args) - rows = self.cursor_to_dict(txn) + rows = [_EventDictReturn(row[0], row[1], row[2]) for row in txn] if rows: - topo = rows[-1]["topological_ordering"] - toke = rows[-1]["stream_ordering"] + topo = rows[-1].topological_ordering + toke = rows[-1].stream_ordering if direction == 'b': # Tokens are positions between events. # This token points *after* the last event in the chunk. @@ -764,7 +784,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Returns: tuple[list[dict], str]: Returns the results as a list of dicts and a token that points to the end of the result set. The dicts have - the keys "event_id", "toplogical_ordering" and "stream_orderign". + the keys "event_id", "topological_ordering" and "stream_orderign". """ from_key = RoomStreamToken.parse(from_key) @@ -777,7 +797,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) events = yield self._get_events( - [r["event_id"] for r in rows], + [r.event_id for r in rows], get_prev_content=True ) -- cgit 1.4.1 From c4af4c24ca988832018feaf0ac5a2f6dbb8bfe68 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 11:55:34 +0100 Subject: Refactor get_recent_events_for_room return type There is no reason to return a tuple of tokens when the last token is always the token passed as an argument. Changing it makes it consistent with other storage APIs --- synapse/handlers/initial_sync.py | 10 +++++----- synapse/handlers/sync.py | 2 +- synapse/storage/stream.py | 16 +++++++++++++++- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index cd33a86599..5a9aa0c16d 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -181,8 +181,8 @@ class InitialSyncHandler(BaseHandler): self.store, user_id, messages ) - start_token = now_token.copy_and_replace("room_key", token[0]) - end_token = now_token.copy_and_replace("room_key", token[1]) + start_token = now_token.copy_and_replace("room_key", token) + end_token = now_token.copy_and_replace("room_key", room_end_token) time_now = self.clock.time_msec() d["messages"] = { @@ -325,8 +325,8 @@ class InitialSyncHandler(BaseHandler): self.store, user_id, messages, is_peeking=is_peeking ) - start_token = StreamToken.START.copy_and_replace("room_key", token[0]) - end_token = StreamToken.START.copy_and_replace("room_key", token[1]) + start_token = StreamToken.START.copy_and_replace("room_key", token) + end_token = StreamToken.START.copy_and_replace("room_key", stream_token) time_now = self.clock.time_msec() @@ -409,7 +409,7 @@ class InitialSyncHandler(BaseHandler): ) start_token = now_token.copy_and_replace("room_key", token[0]) - end_token = now_token.copy_and_replace("room_key", token[1]) + end_token = now_token time_now = self.clock.time_msec() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b52e4c2aff..c25a76d215 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -429,7 +429,7 @@ class SyncHandler(object): Returns: A Deferred map from ((type, state_key)->Event) """ - last_events, token = yield self.store.get_recent_events_for_room( + last_events, _ = yield self.store.get_recent_events_for_room( room_id, end_token=stream_position.room_key, limit=1, ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 772d2c6198..b5baacd32c 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -359,6 +359,20 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): @defer.inlineCallbacks def get_recent_events_for_room(self, room_id, limit, end_token): + """Get the most recent events in the room in topological ordering. + + Args: + room_id (str) + limit (int) + end_token (str): The stream token representing now. + + Returns: + Deferred[tuple[list[FrozenEvent], str]]: Returns a list of + events and a token pointint to the start of the returned + events. + The events returned are in ascending order. + """ + rows, token = yield self.get_recent_event_ids_for_room( room_id, limit, end_token, ) @@ -372,7 +386,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self._set_before_and_after(events, rows) - defer.returnValue((events, (token, end_token))) + defer.returnValue((events, token)) @defer.inlineCallbacks def get_recent_event_ids_for_room(self, room_id, limit, end_token): -- cgit 1.4.1 From e5ab9cd24b2f63c6ca00ae6354dbcbfcd9127dfb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 11:58:35 +0100 Subject: Don't unnecessarily require token to be stream token This allows calling the `get_recent_event_ids_for_room` function in more situations. --- synapse/storage/stream.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index b5baacd32c..5e4327bb96 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -407,7 +407,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if limit == 0: defer.returnValue(([], end_token)) - end_token = RoomStreamToken.parse_stream_token(end_token) + end_token = RoomStreamToken.parse(end_token) rows, token = yield self.runInteraction( "get_recent_event_ids_for_room", self._paginate_room_events_txn, -- cgit 1.4.1 From e2accd7f1d21e34181dd4543eca30ad1ea971b4c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 11:59:45 +0100 Subject: Refactor sync APIs to reuse pagination API The sync API often returns events in a topological rather than stream ordering, e.g. when the user joined the room or on initial sync. When this happens we can reuse existing pagination storage functions. --- synapse/handlers/sync.py | 19 ++++++++---- synapse/storage/stream.py | 73 +++++++++++++++++++++++------------------------ 2 files changed, 48 insertions(+), 44 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c25a76d215..b75daa340d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -354,12 +354,19 @@ class SyncHandler(object): since_key = since_token.room_key while limited and len(recents) < timeline_limit and max_repeat: - events, end_key = yield self.store.get_room_events_stream_for_room( - room_id, - limit=load_limit + 1, - from_key=since_key, - to_key=end_key, - ) + if since_key: + events, end_key = yield self.store.get_room_events_stream_for_room( + room_id, + limit=load_limit + 1, + from_key=since_key, + to_key=end_key, + ) + else: + events, end_key = yield self.store.get_recent_events_for_room( + room_id, + limit=load_limit + 1, + end_token=end_key, + ) loaded_recents = sync_config.filter_collection.filter_room_timeline( events ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 5e4327bb96..8bb4e85709 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -233,52 +233,49 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): @defer.inlineCallbacks def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0, order='DESC'): - # Note: If from_key is None then we return in topological order. This - # is because in that case we're using this as a "get the last few messages - # in a room" function, rather than "get new messages since last sync" - if from_key is not None: - from_id = RoomStreamToken.parse_stream_token(from_key).stream - else: - from_id = None - to_id = RoomStreamToken.parse_stream_token(to_key).stream + """Get new room events in stream ordering since `from_key`. + + Args: + room_id (str) + from_key (str): Token from which no events are returned before + to_key (str): Token from which no events are returned after. (This + is typically the current stream token) + limit (int): Maximum number of events to return + order (str): Either "DESC" or "ASC". Determines which events are + returned when the result is limited. If "DESC" then the most + recent `limit` events are returned, otherwise returns the + oldest `limit` events. + + Returns: + Deferred[tuple[list[FrozenEvent], str]]: Returns the list of + events (in ascending order) and the token from the start of + the chunk of events returned. + """ if from_key == to_key: defer.returnValue(([], from_key)) - if from_id: - has_changed = yield self._events_stream_cache.has_entity_changed( - room_id, from_id - ) + from_id = RoomStreamToken.parse_stream_token(from_key).stream + to_id = RoomStreamToken.parse_stream_token(to_key).stream - if not has_changed: - defer.returnValue(([], from_key)) + has_changed = yield self._events_stream_cache.has_entity_changed( + room_id, from_id + ) - def f(txn): - if from_id is not None: - sql = ( - "SELECT event_id, stream_ordering FROM events WHERE" - " room_id = ?" - " AND not outlier" - " AND stream_ordering > ? AND stream_ordering <= ?" - " ORDER BY stream_ordering %s LIMIT ?" - ) % (order,) - txn.execute(sql, (room_id, from_id, to_id, limit)) - - rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] - else: - sql = ( - "SELECT event_id, topological_ordering, stream_ordering" - " FROM events" - " WHERE" - " room_id = ?" - " AND not outlier" - " AND stream_ordering <= ?" - " ORDER BY topological_ordering %s, stream_ordering %s LIMIT ?" - ) % (order, order,) - txn.execute(sql, (room_id, to_id, limit)) + if not has_changed: + defer.returnValue(([], from_key)) - rows = [_EventDictReturn(row[0], row[1], row[2]) for row in txn] + def f(txn): + sql = ( + "SELECT event_id, stream_ordering FROM events WHERE" + " room_id = ?" + " AND not outlier" + " AND stream_ordering > ? AND stream_ordering <= ?" + " ORDER BY stream_ordering %s LIMIT ?" + ) % (order,) + txn.execute(sql, (room_id, from_id, to_id, limit)) + rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] return rows rows = yield self.runInteraction("get_room_events_stream_for_room", f) -- cgit 1.4.1 From c0e08dc45b8996e88ff3fd6c7584c26f5ec2e839 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 14:31:32 +0100 Subject: Remove unused code path from member change DB func The function is never called without a from_key, so we can remove all the handling for that scenario. --- synapse/storage/stream.py | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 772d2c6198..25aef32551 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -304,10 +304,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): @defer.inlineCallbacks def get_membership_changes_for_user(self, user_id, from_key, to_key): - if from_key is not None: - from_id = RoomStreamToken.parse_stream_token(from_key).stream - else: - from_id = None + from_id = RoomStreamToken.parse_stream_token(from_key).stream to_id = RoomStreamToken.parse_stream_token(to_key).stream if from_key == to_key: @@ -321,26 +318,15 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue([]) def f(txn): - if from_id is not None: - sql = ( - "SELECT m.event_id, stream_ordering FROM events AS e," - " room_memberships AS m" - " WHERE e.event_id = m.event_id" - " AND m.user_id = ?" - " AND e.stream_ordering > ? AND e.stream_ordering <= ?" - " ORDER BY e.stream_ordering ASC" - ) - txn.execute(sql, (user_id, from_id, to_id,)) - else: - sql = ( - "SELECT m.event_id, stream_ordering FROM events AS e," - " room_memberships AS m" - " WHERE e.event_id = m.event_id" - " AND m.user_id = ?" - " AND stream_ordering <= ?" - " ORDER BY stream_ordering ASC" - ) - txn.execute(sql, (user_id, to_id,)) + sql = ( + "SELECT m.event_id, stream_ordering FROM events AS e," + " room_memberships AS m" + " WHERE e.event_id = m.event_id" + " AND m.user_id = ?" + " AND e.stream_ordering > ? AND e.stream_ordering <= ?" + " ORDER BY e.stream_ordering ASC" + ) + txn.execute(sql, (user_id, from_id, to_id,)) rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] -- cgit 1.4.1 From 75552d2148f9f48add495cb7b18a378b0abf70b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 15:15:38 +0100 Subject: Update comments --- synapse/storage/stream.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 772d2c6198..60d2dca154 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -385,7 +385,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Returns: Deferred[tuple[list[_EventDictReturn], str]]: Returns a list of - _EventDictReturn and a token pointint to the start of the returned + _EventDictReturn and a token pointing to the start of the returned events. The events returned are in ascending order. """ @@ -514,7 +514,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): events (list[FrozenEvent]) rows (list[_EventDictReturn]) topo_order (bool): Whether the events were ordered topologically - or by stream ordering + or by stream ordering. If true then all rows should have a non + null topological_ordering. """ for event, row in zip(events, rows): stream = row.stream_ordering @@ -692,9 +693,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): those that match the filter. Returns: - tuple[list[_EventDictReturn], str]: Returns the results as a list - of _EventDictReturn and a token that points to the end of the - result set. + Deferred[tuple[list[_EventDictReturn], str]]: Returns the results + as a list of _EventDictReturn and a token that points to the end + of the result set. """ # Tokens really represent positions between elements, but we use # the convention of pointing to the event before the gap. Hence -- cgit 1.4.1 From 7ce98804ff2ffa8ecacf63ddad469dc0221b655e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 15:42:39 +0100 Subject: Fix up comment --- synapse/storage/stream.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index f529642101..b6d171eb9b 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -368,7 +368,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Returns: Deferred[tuple[list[FrozenEvent], str]]: Returns a list of - events and a token pointint to the start of the returned + events and a token pointing to the start of the returned events. The events returned are in ascending order. """ -- cgit 1.4.1 From fcf55f225541bcffa4bcb32213e0fde74f7b5827 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 15:43:00 +0100 Subject: Fix returned token is no longer a tuple --- synapse/handlers/initial_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 5a9aa0c16d..71af86fe21 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -408,7 +408,7 @@ class InitialSyncHandler(BaseHandler): self.store, user_id, messages, is_peeking=is_peeking, ) - start_token = now_token.copy_and_replace("room_key", token[0]) + start_token = now_token.copy_and_replace("room_key", token) end_token = now_token time_now = self.clock.time_msec() -- cgit 1.4.1 From d26bec8a437e44a05febd02473a8dcd8ff2c9f58 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 May 2018 15:56:07 +0100 Subject: Add comment to sync as to why code path is split --- synapse/handlers/sync.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b75daa340d..263e42dded 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -354,6 +354,11 @@ class SyncHandler(object): since_key = since_token.room_key while limited and len(recents) < timeline_limit and max_repeat: + # If we have a since_key then we are trying to get any events + # that have happened since `since_key` up to `end_key`, so we + # can just use `get_room_events_stream_for_room`. + # Otherwise, we want to return the last N events in the room + # in toplogical ordering. if since_key: events, end_key = yield self.store.get_room_events_stream_for_room( room_id, -- cgit 1.4.1 From 18e144fe088bda9e28697062cda62dca1e03724b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 19:55:03 +0100 Subject: Move RequestsMetrics to its own file This is useful in its own right, because server.py is full of stuff; but more importantly, I want to do some refactoring that will cause a circular reference as it is. --- synapse/http/request_metrics.py | 147 ++++++++++++++++++++++++++++++++++++++++ synapse/http/server.py | 128 ++-------------------------------- 2 files changed, 151 insertions(+), 124 deletions(-) create mode 100644 synapse/http/request_metrics.py diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py new file mode 100644 index 0000000000..4a843a36a7 --- /dev/null +++ b/synapse/http/request_metrics.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +import synapse.metrics +from synapse.util.logcontext import LoggingContext + +logger = logging.getLogger(__name__) + +metrics = synapse.metrics.get_metrics_for("synapse.http.server") + +# total number of responses served, split by method/servlet/tag +response_count = metrics.register_counter( + "response_count", + labels=["method", "servlet", "tag"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_requests", + "_response_time:count", + "_response_ru_utime:count", + "_response_ru_stime:count", + "_response_db_txn_count:count", + "_response_db_txn_duration:count", + ) + ) +) + +requests_counter = metrics.register_counter( + "requests_received", + labels=["method", "servlet", ], +) + +outgoing_responses_counter = metrics.register_counter( + "responses", + labels=["method", "code"], +) + +response_timer = metrics.register_counter( + "response_time_seconds", + labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_time:total", + ), +) + +response_ru_utime = metrics.register_counter( + "response_ru_utime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_utime:total", + ), +) + +response_ru_stime = metrics.register_counter( + "response_ru_stime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_stime:total", + ), +) + +response_db_txn_count = metrics.register_counter( + "response_db_txn_count", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_count:total", + ), +) + +# seconds spent waiting for db txns, excluding scheduling time, when processing +# this request +response_db_txn_duration = metrics.register_counter( + "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_duration:total", + ), +) + +# seconds spent waiting for a db connection, when processing this request +response_db_sched_duration = metrics.register_counter( + "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] +) + +# size in bytes of the response written +response_size = metrics.register_counter( + "response_size", labels=["method", "servlet", "tag"] +) + + +class RequestMetrics(object): + def start(self, clock, name): + self.start = clock.time_msec() + self.start_context = LoggingContext.current_context() + self.name = name + + def stop(self, clock, request): + context = LoggingContext.current_context() + + tag = "" + if context: + tag = context.tag + + if context != self.start_context: + logger.warn( + "Context have unexpectedly changed %r, %r", + context, self.start_context + ) + return + + response_count.inc(request.method, self.name, tag) + + response_timer.inc_by( + clock.time_msec() - self.start, request.method, + self.name, tag + ) + + ru_utime, ru_stime = context.get_resource_usage() + + response_ru_utime.inc_by( + ru_utime, request.method, self.name, tag + ) + response_ru_stime.inc_by( + ru_stime, request.method, self.name, tag + ) + response_db_txn_count.inc_by( + context.db_txn_count, request.method, self.name, tag + ) + response_db_txn_duration.inc_by( + context.db_txn_duration_ms / 1000., request.method, self.name, tag + ) + response_db_sched_duration.inc_by( + context.db_sched_duration_ms / 1000., request.method, self.name, tag + ) + + response_size.inc_by(request.sentLength, request.method, self.name, tag) diff --git a/synapse/http/server.py b/synapse/http/server.py index 55b9ad5251..37b26b908e 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -18,6 +18,10 @@ from synapse.api.errors import ( cs_exception, SynapseError, CodeMessageException, UnrecognizedRequestError, Codes ) +from synapse.http.request_metrics import ( + RequestMetrics, requests_counter, + outgoing_responses_counter, +) from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.caches import intern_dict from synapse.util.metrics import Measure @@ -41,82 +45,6 @@ import simplejson logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) - -# total number of responses served, split by method/servlet/tag -response_count = metrics.register_counter( - "response_count", - labels=["method", "servlet", "tag"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_requests", - "_response_time:count", - "_response_ru_utime:count", - "_response_ru_stime:count", - "_response_db_txn_count:count", - "_response_db_txn_duration:count", - ) - ) -) - -requests_counter = metrics.register_counter( - "requests_received", - labels=["method", "servlet", ], -) - -outgoing_responses_counter = metrics.register_counter( - "responses", - labels=["method", "code"], -) - -response_timer = metrics.register_counter( - "response_time_seconds", - labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_time:total", - ), -) - -response_ru_utime = metrics.register_counter( - "response_ru_utime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_utime:total", - ), -) - -response_ru_stime = metrics.register_counter( - "response_ru_stime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_stime:total", - ), -) - -response_db_txn_count = metrics.register_counter( - "response_db_txn_count", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_count:total", - ), -) - -# seconds spent waiting for db txns, excluding scheduling time, when processing -# this request -response_db_txn_duration = metrics.register_counter( - "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_duration:total", - ), -) - -# seconds spent waiting for a db connection, when processing this request -response_db_sched_duration = metrics.register_counter( - "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] -) - -# size in bytes of the response written -response_size = metrics.register_counter( - "response_size", labels=["method", "servlet", "tag"] -) _next_request_id = 0 @@ -386,54 +314,6 @@ def _unrecognised_request_handler(request): raise UnrecognizedRequestError() -class RequestMetrics(object): - def start(self, clock, name): - self.start = clock.time_msec() - self.start_context = LoggingContext.current_context() - self.name = name - - def stop(self, clock, request): - context = LoggingContext.current_context() - - tag = "" - if context: - tag = context.tag - - if context != self.start_context: - logger.warn( - "Context have unexpectedly changed %r, %r", - context, self.start_context - ) - return - - response_count.inc(request.method, self.name, tag) - - response_timer.inc_by( - clock.time_msec() - self.start, request.method, - self.name, tag - ) - - ru_utime, ru_stime = context.get_resource_usage() - - response_ru_utime.inc_by( - ru_utime, request.method, self.name, tag - ) - response_ru_stime.inc_by( - ru_stime, request.method, self.name, tag - ) - response_db_txn_count.inc_by( - context.db_txn_count, request.method, self.name, tag - ) - response_db_txn_duration.inc_by( - context.db_txn_duration_ms / 1000., request.method, self.name, tag - ) - response_db_sched_duration.inc_by( - context.db_sched_duration_ms / 1000., request.method, self.name, tag - ) - - response_size.inc_by(request.sentLength, request.method, self.name, tag) - - class RootRedirect(resource.Resource): """Redirects the root '/' path to another path.""" -- cgit 1.4.1 From 8460e48d0639a7f575fd3e57dcd5cf263a6d6a19 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:00:11 +0100 Subject: Move request_id management into SynapseRequest --- synapse/http/server.py | 31 ++++++++++++++++--------------- synapse/http/site.py | 9 +++++++++ 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/synapse/http/server.py b/synapse/http/server.py index 37b26b908e..369f7bbdd1 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -46,38 +46,40 @@ import simplejson logger = logging.getLogger(__name__) -_next_request_id = 0 - - def request_handler(include_metrics=False): """Decorator for ``wrap_request_handler``""" return lambda request_handler: wrap_request_handler(request_handler, include_metrics) def wrap_request_handler(request_handler, include_metrics=False): - """Wraps a method that acts as a request handler with the necessary logging - and exception handling. + """Wraps a request handler method with the necessary logging and exception + handling. - The method must have a signature of "handle_foo(self, request)". The - argument "self" must have "version_string" and "clock" attributes. The - argument "request" must be a twisted HTTP request. + The handler method must have a signature of "handle_foo(self, request)", + where "self" must have "version_string" and "clock" attributes (and + "request" must be a SynapseRequest). - The method must return a deferred. If the deferred succeeds we assume that + The handler must return a deferred. If the deferred succeeds we assume that a response has been sent. If the deferred fails with a SynapseError we use it to send a JSON response with the appropriate HTTP reponse code. If the deferred fails with any other type of error we send a 500 reponse. - We insert a unique request-id into the logging context for this request and - log the response and duration for this request. + As well as calling `request.processing` (which will log the response and + duration for this request), the wrapped request handler will insert the + request id into the logging context. """ @defer.inlineCallbacks def wrapped_request_handler(self, request): - global _next_request_id - request_id = "%s-%s" % (request.method, _next_request_id) - _next_request_id += 1 + """ + Args: + self: + request (synapse.http.site.SynapseRequest): + """ + request_id = request.get_request_id() with LoggingContext(request_id) as request_context: + request_context.request = request_id with Measure(self.clock, "wrapped_request_handler"): request_metrics = RequestMetrics() # we start the request metrics timer here with an initial stab @@ -87,7 +89,6 @@ def wrap_request_handler(request_handler, include_metrics=False): servlet_name = self.__class__.__name__ request_metrics.start(self.clock, name=servlet_name) - request_context.request = request_id with request.processing(): try: with PreserveLoggingContext(request_context): diff --git a/synapse/http/site.py b/synapse/http/site.py index c8b46e1af2..6af276e69a 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -22,6 +22,8 @@ import time ACCESS_TOKEN_RE = re.compile(br'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') +_next_request_seq = 0 + class SynapseRequest(Request): def __init__(self, site, *args, **kw): @@ -30,6 +32,10 @@ class SynapseRequest(Request): self.authenticated_entity = None self.start_time = 0 + global _next_request_seq + self.request_seq = _next_request_seq + _next_request_seq += 1 + def __repr__(self): # We overwrite this so that we don't log ``access_token`` return '<%s at 0x%x method=%s uri=%s clientproto=%s site=%s>' % ( @@ -41,6 +47,9 @@ class SynapseRequest(Request): self.site.site_tag, ) + def get_request_id(self): + return "%s-%i" % (self.method, self.request_seq) + def get_redacted_uri(self): return ACCESS_TOKEN_RE.sub( br'\1\3', -- cgit 1.4.1 From 09b29f9c4a941ae5721b1f5e296156dfe92e3395 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:03:11 +0100 Subject: Make RequestMetrics take a raw time rather than a clock ... which is going to make it easier to move around. --- synapse/http/request_metrics.py | 8 ++++---- synapse/http/server.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 4a843a36a7..4e8a5f5306 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -100,12 +100,12 @@ response_size = metrics.register_counter( class RequestMetrics(object): - def start(self, clock, name): - self.start = clock.time_msec() + def start(self, time_msec, name): + self.start = time_msec self.start_context = LoggingContext.current_context() self.name = name - def stop(self, clock, request): + def stop(self, time_msec, request): context = LoggingContext.current_context() tag = "" @@ -122,7 +122,7 @@ class RequestMetrics(object): response_count.inc(request.method, self.name, tag) response_timer.inc_by( - clock.time_msec() - self.start, request.method, + time_msec - self.start, request.method, self.name, tag ) diff --git a/synapse/http/server.py b/synapse/http/server.py index 369f7bbdd1..b16c9c17f6 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -87,7 +87,7 @@ def wrap_request_handler(request_handler, include_metrics=False): # JsonResource (or a subclass), and JsonResource._async_render # will update it once it picks a servlet. servlet_name = self.__class__.__name__ - request_metrics.start(self.clock, name=servlet_name) + request_metrics.start(self.clock.time_msec(), name=servlet_name) with request.processing(): try: @@ -138,7 +138,7 @@ def wrap_request_handler(request_handler, include_metrics=False): finally: try: request_metrics.stop( - self.clock, request + self.clock.time_msec(), request ) except Exception as e: logger.warn("Failed to stop metrics: %r", e) -- cgit 1.4.1 From c6f730282c3a25468df0e624293aefd60cef7840 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:05:14 +0100 Subject: Move RequestMetrics handling into SynapseRequest.processing() It fits quite nicely here, and opens the path to getting rid of the "include_metrics" mess. --- synapse/http/server.py | 19 +++++--------- synapse/http/site.py | 69 ++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 64 insertions(+), 24 deletions(-) diff --git a/synapse/http/server.py b/synapse/http/server.py index b16c9c17f6..8e5d1d58f5 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -19,7 +19,7 @@ from synapse.api.errors import ( cs_exception, SynapseError, CodeMessageException, UnrecognizedRequestError, Codes ) from synapse.http.request_metrics import ( - RequestMetrics, requests_counter, + requests_counter, outgoing_responses_counter, ) from synapse.util.logcontext import LoggingContext, PreserveLoggingContext @@ -81,19 +81,18 @@ def wrap_request_handler(request_handler, include_metrics=False): with LoggingContext(request_id) as request_context: request_context.request = request_id with Measure(self.clock, "wrapped_request_handler"): - request_metrics = RequestMetrics() # we start the request metrics timer here with an initial stab # at the servlet name. For most requests that name will be # JsonResource (or a subclass), and JsonResource._async_render # will update it once it picks a servlet. servlet_name = self.__class__.__name__ - request_metrics.start(self.clock.time_msec(), name=servlet_name) - - with request.processing(): + with request.processing(servlet_name): try: with PreserveLoggingContext(request_context): if include_metrics: - yield request_handler(self, request, request_metrics) + yield request_handler( + self, request, request.request_metrics, + ) else: requests_counter.inc(request.method, servlet_name) yield request_handler(self, request) @@ -135,13 +134,7 @@ def wrap_request_handler(request_handler, include_metrics=False): pretty_print=_request_user_agent_is_curl(request), version_string=self.version_string, ) - finally: - try: - request_metrics.stop( - self.clock.time_msec(), request - ) - except Exception as e: - logger.warn("Failed to stop metrics: %r", e) + return wrapped_request_handler diff --git a/synapse/http/site.py b/synapse/http/site.py index 6af276e69a..bfd9832aa0 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -12,20 +12,38 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.logcontext import LoggingContext -from twisted.web.server import Site, Request - import contextlib import logging import re import time +from twisted.web.server import Site, Request + +from synapse.http.request_metrics import RequestMetrics +from synapse.util.logcontext import LoggingContext + +logger = logging.getLogger(__name__) + ACCESS_TOKEN_RE = re.compile(br'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') _next_request_seq = 0 class SynapseRequest(Request): + """Class which encapsulates an HTTP request to synapse. + + All of the requests processed in synapse are of this type. + + It extends twisted's twisted.web.server.Request, and adds: + * Unique request ID + * Redaction of access_token query-params in __repr__ + * Logging at start and end + * Metrics to record CPU, wallclock and DB time by endpoint. + + It provides a method `processing` which should be called by the Resource + which is handling the request, and returns a context manager. + + """ def __init__(self, site, *args, **kw): Request.__init__(self, *args, **kw) self.site = site @@ -59,7 +77,11 @@ class SynapseRequest(Request): def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] - def started_processing(self): + def _started_processing(self, servlet_name): + self.start_time = int(time.time() * 1000) + self.request_metrics = RequestMetrics() + self.request_metrics.start(self.start_time, name=servlet_name) + self.site.access_logger.info( "%s - %s - Received request: %s %s", self.getClientIP(), @@ -67,10 +89,8 @@ class SynapseRequest(Request): self.method, self.get_redacted_uri() ) - self.start_time = int(time.time() * 1000) - - def finished_processing(self): + def _finished_processing(self): try: context = LoggingContext.current_context() ru_utime, ru_stime = context.get_resource_usage() @@ -81,6 +101,8 @@ class SynapseRequest(Request): ru_utime, ru_stime = (0, 0) db_txn_count, db_txn_duration_ms = (0, 0) + end_time = int(time.time() * 1000) + self.site.access_logger.info( "%s - %s - {%s}" " Processed request: %dms (%dms, %dms) (%dms/%dms/%d)" @@ -88,7 +110,7 @@ class SynapseRequest(Request): self.getClientIP(), self.site.site_tag, self.authenticated_entity, - int(time.time() * 1000) - self.start_time, + end_time - self.start_time, int(ru_utime * 1000), int(ru_stime * 1000), db_sched_duration_ms, @@ -102,11 +124,36 @@ class SynapseRequest(Request): self.get_user_agent(), ) + try: + self.request_metrics.stop(end_time, self) + except Exception as e: + logger.warn("Failed to stop metrics: %r", e) + @contextlib.contextmanager - def processing(self): - self.started_processing() + def processing(self, servlet_name): + """Record the fact that we are processing this request. + + Returns a context manager; the correct way to use this is: + + @defer.inlineCallbacks + def handle_request(request): + with request.processing("FooServlet"): + yield really_handle_the_request() + + This will log the request's arrival. Once the context manager is + closed, the completion of the request will be logged, and the various + metrics will be updated. + + Args: + servlet_name (str): the name of the servlet which will be + processing this request. This is used in the metrics. + + It is possible to update this afterwards by updating + self.request_metrics.servlet_name. + """ + self._started_processing(servlet_name) yield - self.finished_processing() + self._finished_processing() class XForwardedForRequest(SynapseRequest): -- cgit 1.4.1 From b8700dd7d0482813beb9c780b411de5108ae078a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 18:03:04 +0100 Subject: Bump requests_counter in wrapped_request_handler less magic --- synapse/http/server.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/synapse/http/server.py b/synapse/http/server.py index 8e5d1d58f5..1d05b53873 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -90,12 +90,20 @@ def wrap_request_handler(request_handler, include_metrics=False): try: with PreserveLoggingContext(request_context): if include_metrics: - yield request_handler( + d = request_handler( self, request, request.request_metrics, ) else: - requests_counter.inc(request.method, servlet_name) - yield request_handler(self, request) + d = request_handler(self, request) + + # record the arrival of the request *after* + # dispatching to the handler, so that the handler + # can update the servlet name in the request + # metrics + requests_counter.inc(request.method, + request.request_metrics.name) + yield d + except CodeMessageException as e: code = e.code if isinstance(e, SynapseError): @@ -220,7 +228,6 @@ class JsonResource(HttpServer, resource.Resource): servlet_classname = "%r" % callback request_metrics.name = servlet_classname - requests_counter.inc(request.method, servlet_classname) # Now trigger the callback. If it returns a response, we send it # here. If it throws an exception, that is handled by the wrapper -- cgit 1.4.1 From 49e5a613f1ac53498b31150f78332fc64932e61b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:44:22 +0100 Subject: Move outgoing_responses_counter handling to RequestMetrics it's much neater there. --- synapse/http/request_metrics.py | 2 ++ synapse/http/server.py | 4 ---- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 4e8a5f5306..8c850bf23f 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -119,6 +119,8 @@ class RequestMetrics(object): ) return + outgoing_responses_counter.inc(request.method, str(request.code)) + response_count.inc(request.method, self.name, tag) response_timer.inc_by( diff --git a/synapse/http/server.py b/synapse/http/server.py index 1d05b53873..200b2c4837 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -20,7 +20,6 @@ from synapse.api.errors import ( ) from synapse.http.request_metrics import ( requests_counter, - outgoing_responses_counter, ) from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.caches import intern_dict @@ -112,7 +111,6 @@ def wrap_request_handler(request_handler, include_metrics=False): ) else: logger.exception(e) - outgoing_responses_counter.inc(request.method, str(code)) respond_with_json( request, code, cs_exception(e), send_cors=True, pretty_print=_request_user_agent_is_curl(request), @@ -274,8 +272,6 @@ class JsonResource(HttpServer, resource.Resource): def _send_response(self, request, code, response_json_object, response_code_message=None): - outgoing_responses_counter.inc(request.method, str(code)) - # TODO: Only enable CORS for the requests that need it. respond_with_json( request, code, response_json_object, -- cgit 1.4.1 From 9589a1925e944360c68f7eb8a65f8ba94ec9bb84 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:49:29 +0100 Subject: Remove include_metrics param The metrics are now available via the request, so this is redundant and can go away at last. --- synapse/http/server.py | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/synapse/http/server.py b/synapse/http/server.py index 200b2c4837..9598969de8 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -45,12 +45,12 @@ import simplejson logger = logging.getLogger(__name__) -def request_handler(include_metrics=False): +def request_handler(): """Decorator for ``wrap_request_handler``""" - return lambda request_handler: wrap_request_handler(request_handler, include_metrics) + return wrap_request_handler -def wrap_request_handler(request_handler, include_metrics=False): +def wrap_request_handler(request_handler): """Wraps a request handler method with the necessary logging and exception handling. @@ -88,12 +88,7 @@ def wrap_request_handler(request_handler, include_metrics=False): with request.processing(servlet_name): try: with PreserveLoggingContext(request_context): - if include_metrics: - d = request_handler( - self, request, request.request_metrics, - ) - else: - d = request_handler(self, request) + d = request_handler(self, request) # record the arrival of the request *after* # dispatching to the handler, so that the handler @@ -206,13 +201,9 @@ class JsonResource(HttpServer, resource.Resource): self._async_render(request) return server.NOT_DONE_YET - # Disable metric reporting because _async_render does its own metrics. - # It does its own metric reporting because _async_render dispatches to - # a callback and it's the class name of that callback we want to report - # against rather than the JsonResource itself. - @request_handler(include_metrics=True) + @request_handler() @defer.inlineCallbacks - def _async_render(self, request, request_metrics): + def _async_render(self, request): """ This gets called from render() every time someone sends us a request. This checks if anyone has registered a callback for that method and path. @@ -224,8 +215,7 @@ class JsonResource(HttpServer, resource.Resource): servlet_classname = servlet_instance.__class__.__name__ else: servlet_classname = "%r" % callback - - request_metrics.name = servlet_classname + request.request_metrics.name = servlet_classname # Now trigger the callback. If it returns a response, we send it # here. If it throws an exception, that is handled by the wrapper -- cgit 1.4.1 From 09f570b9357d40764a0716f7e287fa4dde44610a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 10 May 2018 11:59:51 +0100 Subject: Factor wrap_request_handler_with_logging out of wrap_request_handler ... so that it can be used on non-JSON endpoints --- synapse/http/server.py | 120 +++++++++++++++++++++++++++---------------------- 1 file changed, 66 insertions(+), 54 deletions(-) diff --git a/synapse/http/server.py b/synapse/http/server.py index 9598969de8..fd58e65c4b 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -50,9 +50,10 @@ def request_handler(): return wrap_request_handler -def wrap_request_handler(request_handler): - """Wraps a request handler method with the necessary logging and exception - handling. +def wrap_request_handler(h): + """Wraps a request handler method with exception handling. + + Also adds logging as per wrap_request_handler_with_logging. The handler method must have a signature of "handle_foo(self, request)", where "self" must have "version_string" and "clock" attributes (and @@ -62,12 +63,63 @@ def wrap_request_handler(request_handler): a response has been sent. If the deferred fails with a SynapseError we use it to send a JSON response with the appropriate HTTP reponse code. If the deferred fails with any other type of error we send a 500 reponse. + """ + + @defer.inlineCallbacks + def wrapped_request_handler(self, request): + try: + yield h(self, request) + except CodeMessageException as e: + code = e.code + if isinstance(e, SynapseError): + logger.info( + "%s SynapseError: %s - %s", request, code, e.msg + ) + else: + logger.exception(e) + respond_with_json( + request, code, cs_exception(e), send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + version_string=self.version_string, + ) + + except Exception: + # failure.Failure() fishes the original Failure out + # of our stack, and thus gives us a sensible stack + # trace. + f = failure.Failure() + logger.error( + "Failed handle request via %r: %r: %s", + h, + request, + f.getTraceback().rstrip(), + ) + respond_with_json( + request, + 500, + { + "error": "Internal server error", + "errcode": Codes.UNKNOWN, + }, + send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + version_string=self.version_string, + ) + + return wrap_request_handler_with_logging(wrapped_request_handler) + + +def wrap_request_handler_with_logging(h): + """Wraps a request handler to provide logging and metrics + + The handler method must have a signature of "handle_foo(self, request)", + where "self" must have a "clock" attribute (and "request" must be a + SynapseRequest). As well as calling `request.processing` (which will log the response and duration for this request), the wrapped request handler will insert the request id into the logging context. """ - @defer.inlineCallbacks def wrapped_request_handler(self, request): """ @@ -86,56 +138,16 @@ def wrap_request_handler(request_handler): # will update it once it picks a servlet. servlet_name = self.__class__.__name__ with request.processing(servlet_name): - try: - with PreserveLoggingContext(request_context): - d = request_handler(self, request) - - # record the arrival of the request *after* - # dispatching to the handler, so that the handler - # can update the servlet name in the request - # metrics - requests_counter.inc(request.method, - request.request_metrics.name) - yield d - - except CodeMessageException as e: - code = e.code - if isinstance(e, SynapseError): - logger.info( - "%s SynapseError: %s - %s", request, code, e.msg - ) - else: - logger.exception(e) - respond_with_json( - request, code, cs_exception(e), send_cors=True, - pretty_print=_request_user_agent_is_curl(request), - version_string=self.version_string, - ) - except Exception: - # failure.Failure() fishes the original Failure out - # of our stack, and thus gives us a sensible stack - # trace. - f = failure.Failure() - logger.error( - "Failed handle request %s.%s on %r: %r: %s", - request_handler.__module__, - request_handler.__name__, - self, - request, - f.getTraceback().rstrip(), - ) - respond_with_json( - request, - 500, - { - "error": "Internal server error", - "errcode": Codes.UNKNOWN, - }, - send_cors=True, - pretty_print=_request_user_agent_is_curl(request), - version_string=self.version_string, - ) - + with PreserveLoggingContext(request_context): + d = h(self, request) + + # record the arrival of the request *after* + # dispatching to the handler, so that the handler + # can update the servlet name in the request + # metrics + requests_counter.inc(request.method, + request.request_metrics.name) + yield d return wrapped_request_handler -- cgit 1.4.1 From 645cb4bf06deee1c4c10ecc3d7df2c914168f19a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 10 May 2018 12:10:27 +0100 Subject: Remove redundant request_handler decorator This is needless complexity; we might as well use the wrapper directly. Also rename wrap_request_handler->wrap_json_request_handler. --- synapse/http/additional_resource.py | 4 ++-- synapse/http/server.py | 9 ++------- synapse/rest/key/v2/remote_key_resource.py | 8 +++++--- synapse/rest/media/v1/download_resource.py | 16 +++++++++------- synapse/rest/media/v1/preview_url_resource.py | 5 +++-- synapse/rest/media/v1/thumbnail_resource.py | 23 +++++++++++++---------- synapse/rest/media/v1/upload_resource.py | 15 ++++++++------- 7 files changed, 42 insertions(+), 38 deletions(-) diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py index 343e932cb1..d9e7f5dfb7 100644 --- a/synapse/http/additional_resource.py +++ b/synapse/http/additional_resource.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import wrap_request_handler +from synapse.http.server import wrap_json_request_handler from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET @@ -50,6 +50,6 @@ class AdditionalResource(Resource): self._async_render(request) return NOT_DONE_YET - @wrap_request_handler + @wrap_json_request_handler def _async_render(self, request): return self._handler(request) diff --git a/synapse/http/server.py b/synapse/http/server.py index fd58e65c4b..f29e36f490 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -45,12 +45,7 @@ import simplejson logger = logging.getLogger(__name__) -def request_handler(): - """Decorator for ``wrap_request_handler``""" - return wrap_request_handler - - -def wrap_request_handler(h): +def wrap_json_request_handler(h): """Wraps a request handler method with exception handling. Also adds logging as per wrap_request_handler_with_logging. @@ -213,7 +208,7 @@ class JsonResource(HttpServer, resource.Resource): self._async_render(request) return server.NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def _async_render(self, request): """ This gets called from render() every time someone sends us a request. diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 17e6079cba..17b3077926 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import request_handler, respond_with_json_bytes +from synapse.http.server import ( + respond_with_json_bytes, wrap_json_request_handler, +) from synapse.http.servlet import parse_integer, parse_json_object_from_request from synapse.api.errors import SynapseError, Codes from synapse.crypto.keyring import KeyLookupError @@ -99,7 +101,7 @@ class RemoteKey(Resource): self.async_render_GET(request) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def async_render_GET(self, request): if len(request.postpath) == 1: @@ -124,7 +126,7 @@ class RemoteKey(Resource): self.async_render_POST(request) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def async_render_POST(self, request): content = parse_json_object_from_request(request) diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index fe7e17596f..3fc3f64d62 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -12,16 +12,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import synapse.http.servlet +import logging -from ._base import parse_media_id, respond_404 +from twisted.internet import defer from twisted.web.resource import Resource -from synapse.http.server import request_handler, set_cors_headers - from twisted.web.server import NOT_DONE_YET -from twisted.internet import defer -import logging +from synapse.http.server import ( + set_cors_headers, + wrap_json_request_handler, +) +import synapse.http.servlet +from ._base import parse_media_id, respond_404 logger = logging.getLogger(__name__) @@ -43,7 +45,7 @@ class DownloadResource(Resource): self._async_render_GET(request) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def _async_render_GET(self, request): set_cors_headers(request) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 9290d7946f..6b089689b4 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -40,8 +40,9 @@ from synapse.util.stringutils import random_string from synapse.util.caches.expiringcache import ExpiringCache from synapse.http.client import SpiderHttpClient from synapse.http.server import ( - request_handler, respond_with_json_bytes, + respond_with_json_bytes, respond_with_json, + wrap_json_request_handler, ) from synapse.util.async import ObservableDeferred from synapse.util.stringutils import is_ascii @@ -90,7 +91,7 @@ class PreviewUrlResource(Resource): self._async_render_GET(request) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def _async_render_GET(self, request): diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 58ada49711..6c12d79f56 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -14,18 +14,21 @@ # limitations under the License. -from ._base import ( - parse_media_id, respond_404, respond_with_file, FileInfo, - respond_with_responder, -) -from twisted.web.resource import Resource -from synapse.http.servlet import parse_string, parse_integer -from synapse.http.server import request_handler, set_cors_headers +import logging -from twisted.web.server import NOT_DONE_YET from twisted.internet import defer +from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET -import logging +from synapse.http.server import ( + set_cors_headers, + wrap_json_request_handler, +) +from synapse.http.servlet import parse_integer, parse_string +from ._base import ( + FileInfo, parse_media_id, respond_404, respond_with_file, + respond_with_responder, +) logger = logging.getLogger(__name__) @@ -48,7 +51,7 @@ class ThumbnailResource(Resource): self._async_render_GET(request) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def _async_render_GET(self, request): set_cors_headers(request) diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index a31e75cb46..7d01c57fd1 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -13,16 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import respond_with_json, request_handler - -from synapse.api.errors import SynapseError +import logging -from twisted.web.server import NOT_DONE_YET from twisted.internet import defer - from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET -import logging +from synapse.api.errors import SynapseError +from synapse.http.server import ( + respond_with_json, + wrap_json_request_handler, +) logger = logging.getLogger(__name__) @@ -51,7 +52,7 @@ class UploadResource(Resource): respond_with_json(request, 200, {}, send_cors=True) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def _async_render_POST(self, request): requester = yield self.auth.get_user_by_req(request) -- cgit 1.4.1 From 318711e1399da009910c3a9e5fa297c28a2d0a97 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 10 May 2018 18:46:59 +0100 Subject: Set Server header in SynapseRequest (instead of everywhere that writes a response. Or rather, the subset of places which write responses where we haven't forgotten it). This also means that we don't have to have the mysterious version_string attribute in anything with a request handler. Unfortunately it does mean that we have to pass the version string wherever we instantiate a SynapseSite, which has been c&ped 150 times, but that is code that ought to be cleaned up anyway really. --- synapse/app/appservice.py | 1 + synapse/app/client_reader.py | 1 + synapse/app/event_creator.py | 1 + synapse/app/federation_reader.py | 1 + synapse/app/federation_sender.py | 1 + synapse/app/frontend_proxy.py | 1 + synapse/app/homeserver.py | 2 ++ synapse/app/media_repository.py | 1 + synapse/app/pusher.py | 1 + synapse/app/synchrotron.py | 1 + synapse/app/user_dir.py | 1 + synapse/http/additional_resource.py | 3 +-- synapse/http/server.py | 14 ++++---------- synapse/http/site.py | 11 ++++++++++- synapse/rest/client/v1/pusher.py | 1 - synapse/rest/client/v2_alpha/auth.py | 2 -- synapse/rest/key/v1/server_key_resource.py | 2 -- synapse/rest/key/v2/local_key_resource.py | 2 -- synapse/rest/key/v2/remote_key_resource.py | 2 -- synapse/rest/media/v1/download_resource.py | 3 +-- synapse/rest/media/v1/preview_url_resource.py | 1 - synapse/rest/media/v1/thumbnail_resource.py | 1 - synapse/rest/media/v1/upload_resource.py | 1 - 23 files changed, 28 insertions(+), 27 deletions(-) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 58f2c9d68c..b1efacc9f8 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -74,6 +74,7 @@ class AppserviceServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 267d34c881..38b98382c6 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -98,6 +98,7 @@ class ClientReaderServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index b915d12d53..bd7f3d5679 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -114,6 +114,7 @@ class EventCreatorServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index c1dc66dd17..6e10b27b9e 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -87,6 +87,7 @@ class FederationReaderServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index a08af83a4c..6f24e32d6d 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -101,6 +101,7 @@ class FederationSenderServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index b349e3e3ce..0f700ee786 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -152,6 +152,7 @@ class FrontendProxyServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index a0e465d644..75f40fd5a4 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -140,6 +140,7 @@ class SynapseHomeServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ), self.tls_server_context_factory, ) @@ -153,6 +154,7 @@ class SynapseHomeServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) logger.info("Synapse now listening on port %d", port) diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index fc8282bbc1..9c93195f0a 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -94,6 +94,7 @@ class MediaRepositoryServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 26930d1b3b..3912eae48c 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -104,6 +104,7 @@ class PusherServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 7152b1deb4..c6294a7a0c 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -281,6 +281,7 @@ class SynchrotronServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 5ba7e9b416..53eb3474da 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -126,6 +126,7 @@ class UserDirectoryServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py index d9e7f5dfb7..a797396ade 100644 --- a/synapse/http/additional_resource.py +++ b/synapse/http/additional_resource.py @@ -42,8 +42,7 @@ class AdditionalResource(Resource): Resource.__init__(self) self._handler = handler - # these are required by the request_handler wrapper - self.version_string = hs.version_string + # required by the request_handler wrapper self.clock = hs.get_clock() def render(self, request): diff --git a/synapse/http/server.py b/synapse/http/server.py index f29e36f490..b6e2ae14a2 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -51,8 +51,8 @@ def wrap_json_request_handler(h): Also adds logging as per wrap_request_handler_with_logging. The handler method must have a signature of "handle_foo(self, request)", - where "self" must have "version_string" and "clock" attributes (and - "request" must be a SynapseRequest). + where "self" must have a "clock" attribute (and "request" must be a + SynapseRequest). The handler must return a deferred. If the deferred succeeds we assume that a response has been sent. If the deferred fails with a SynapseError we use @@ -75,7 +75,6 @@ def wrap_json_request_handler(h): respond_with_json( request, code, cs_exception(e), send_cors=True, pretty_print=_request_user_agent_is_curl(request), - version_string=self.version_string, ) except Exception: @@ -98,7 +97,6 @@ def wrap_json_request_handler(h): }, send_cors=True, pretty_print=_request_user_agent_is_curl(request), - version_string=self.version_string, ) return wrap_request_handler_with_logging(wrapped_request_handler) @@ -192,7 +190,6 @@ class JsonResource(HttpServer, resource.Resource): self.canonical_json = canonical_json self.clock = hs.get_clock() self.path_regexs = {} - self.version_string = hs.version_string self.hs = hs def register_paths(self, method, path_patterns, callback): @@ -275,7 +272,6 @@ class JsonResource(HttpServer, resource.Resource): send_cors=True, response_code_message=response_code_message, pretty_print=_request_user_agent_is_curl(request), - version_string=self.version_string, canonical_json=self.canonical_json, ) @@ -326,7 +322,7 @@ class RootRedirect(resource.Resource): def respond_with_json(request, code, json_object, send_cors=False, response_code_message=None, pretty_print=False, - version_string="", canonical_json=True): + canonical_json=True): # could alternatively use request.notifyFinish() and flip a flag when # the Deferred fires, but since the flag is RIGHT THERE it seems like # a waste. @@ -348,12 +344,11 @@ def respond_with_json(request, code, json_object, send_cors=False, request, code, json_bytes, send_cors=send_cors, response_code_message=response_code_message, - version_string=version_string ) def respond_with_json_bytes(request, code, json_bytes, send_cors=False, - version_string="", response_code_message=None): + response_code_message=None): """Sends encoded JSON in response to the given request. Args: @@ -367,7 +362,6 @@ def respond_with_json_bytes(request, code, json_bytes, send_cors=False, request.setResponseCode(code, message=response_code_message) request.setHeader(b"Content-Type", b"application/json") - request.setHeader(b"Server", version_string) request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),)) request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate") diff --git a/synapse/http/site.py b/synapse/http/site.py index bfd9832aa0..202a990508 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -77,6 +77,11 @@ class SynapseRequest(Request): def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] + def render(self, resrc): + # override the Server header which is set by twisted + self.setHeader("Server", self.site.server_version_string) + return Request.render(self, resrc) + def _started_processing(self, servlet_name): self.start_time = int(time.time() * 1000) self.request_metrics = RequestMetrics() @@ -151,6 +156,8 @@ class SynapseRequest(Request): It is possible to update this afterwards by updating self.request_metrics.servlet_name. """ + # TODO: we should probably just move this into render() and finish(), + # to save having to call a separate method. self._started_processing(servlet_name) yield self._finished_processing() @@ -191,7 +198,8 @@ class SynapseSite(Site): Subclass of a twisted http Site that does access logging with python's standard logging """ - def __init__(self, logger_name, site_tag, config, resource, *args, **kwargs): + def __init__(self, logger_name, site_tag, config, resource, + server_version_string, *args, **kwargs): Site.__init__(self, resource, *args, **kwargs) self.site_tag = site_tag @@ -199,6 +207,7 @@ class SynapseSite(Site): proxied = config.get("x_forwarded", False) self.requestFactory = SynapseRequestFactory(self, proxied) self.access_logger = logging.getLogger(logger_name) + self.server_version_string = server_version_string def log(self, request): pass diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 0206e664c1..40e523cc5f 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -176,7 +176,6 @@ class PushersRemoveRestServlet(RestServlet): request.setResponseCode(200) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Server", self.hs.version_string) request.setHeader(b"Content-Length", b"%d" % ( len(PushersRemoveRestServlet.SUCCESS_HTML), )) diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 8e5577148f..d6f3a19648 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -129,7 +129,6 @@ class AuthRestServlet(RestServlet): html_bytes = html.encode("utf8") request.setResponseCode(200) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Server", self.hs.version_string) request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) request.write(html_bytes) @@ -175,7 +174,6 @@ class AuthRestServlet(RestServlet): html_bytes = html.encode("utf8") request.setResponseCode(200) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Server", self.hs.version_string) request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) request.write(html_bytes) diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py index bd4fea5774..1498d188c1 100644 --- a/synapse/rest/key/v1/server_key_resource.py +++ b/synapse/rest/key/v1/server_key_resource.py @@ -49,7 +49,6 @@ class LocalKey(Resource): """ def __init__(self, hs): - self.version_string = hs.version_string self.response_body = encode_canonical_json( self.response_json_object(hs.config) ) @@ -84,7 +83,6 @@ class LocalKey(Resource): def render_GET(self, request): return respond_with_json_bytes( request, 200, self.response_body, - version_string=self.version_string ) def getChild(self, name, request): diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index be68d9a096..04775b3c45 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -63,7 +63,6 @@ class LocalKey(Resource): isLeaf = True def __init__(self, hs): - self.version_string = hs.version_string self.config = hs.config self.clock = hs.clock self.update_response_body(self.clock.time_msec()) @@ -115,5 +114,4 @@ class LocalKey(Resource): self.update_response_body(time_now) return respond_with_json_bytes( request, 200, self.response_body, - version_string=self.version_string ) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 17b3077926..21b4c1175e 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -93,7 +93,6 @@ class RemoteKey(Resource): def __init__(self, hs): self.keyring = hs.get_keyring() self.store = hs.get_datastore() - self.version_string = hs.version_string self.clock = hs.get_clock() self.federation_domain_whitelist = hs.config.federation_domain_whitelist @@ -242,5 +241,4 @@ class RemoteKey(Resource): respond_with_json_bytes( request, 200, result_io.getvalue(), - version_string=self.version_string ) diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index 3fc3f64d62..8cf8820c31 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -37,9 +37,8 @@ class DownloadResource(Resource): self.media_repo = media_repo self.server_name = hs.hostname - # Both of these are expected by @request_handler() + # this is expected by @wrap_json_request_handler self.clock = hs.get_clock() - self.version_string = hs.version_string def render_GET(self, request): self._async_render_GET(request) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 6b089689b4..2839207abc 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -58,7 +58,6 @@ class PreviewUrlResource(Resource): self.auth = hs.get_auth() self.clock = hs.get_clock() - self.version_string = hs.version_string self.filepaths = media_repo.filepaths self.max_spider_size = hs.config.max_spider_size self.server_name = hs.hostname diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 6c12d79f56..aae6e464e8 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -44,7 +44,6 @@ class ThumbnailResource(Resource): self.media_storage = media_storage self.dynamic_thumbnails = hs.config.dynamic_thumbnails self.server_name = hs.hostname - self.version_string = hs.version_string self.clock = hs.get_clock() def render_GET(self, request): diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 7d01c57fd1..7567476fce 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -41,7 +41,6 @@ class UploadResource(Resource): self.server_name = hs.hostname self.auth = hs.get_auth() self.max_upload_size = hs.config.max_upload_size - self.version_string = hs.version_string self.clock = hs.get_clock() def render_POST(self, request): -- cgit 1.4.1 From db18d854cd8e197cb3d77441b2a7ebd6f81beb81 Mon Sep 17 00:00:00 2001 From: Damir Manapov Date: Thu, 10 May 2018 22:13:31 +0300 Subject: transaction_id, destination twice --- synapse/federation/units.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/federation/units.py b/synapse/federation/units.py index 3f645acc43..01c5b8fe17 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -74,8 +74,6 @@ class Transaction(JsonEncodedObject): "previous_ids", "pdus", "edus", - "transaction_id", - "destination", "pdu_failures", ] -- cgit 1.4.1