diff --git a/.buildkite/postgres-config.yaml b/.buildkite/postgres-config.yaml
new file mode 100644
index 0000000000..a35fec394d
--- /dev/null
+++ b/.buildkite/postgres-config.yaml
@@ -0,0 +1,21 @@
+# Configuration file used for testing the 'synapse_port_db' script.
+# Tells the script to connect to the postgresql database that will be available in the
+# CI's Docker setup at the point where this file is considered.
+server_name: "test"
+
+signing_key_path: "/src/.buildkite/test.signing.key"
+
+report_stats: false
+
+database:
+ name: "psycopg2"
+ args:
+ user: postgres
+ host: postgres
+ password: postgres
+ database: synapse
+
+# Suppress the key server warning.
+trusted_key_servers:
+ - server_name: "matrix.org"
+suppress_key_server_warning: true
diff --git a/.buildkite/scripts/create_postgres_db.py b/.buildkite/scripts/create_postgres_db.py
new file mode 100755
index 0000000000..df6082b0ac
--- /dev/null
+++ b/.buildkite/scripts/create_postgres_db.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from synapse.storage.engines import create_engine
+
+logger = logging.getLogger("create_postgres_db")
+
+if __name__ == "__main__":
+ # Create a PostgresEngine.
+ db_engine = create_engine({"name": "psycopg2", "args": {}})
+
+ # Connect to postgres to create the base database.
+ # We use "postgres" as a database because it's bound to exist and the "synapse" one
+ # doesn't exist yet.
+ db_conn = db_engine.module.connect(
+ user="postgres", host="postgres", password="postgres", dbname="postgres"
+ )
+ db_conn.autocommit = True
+ cur = db_conn.cursor()
+ cur.execute("CREATE DATABASE synapse;")
+ cur.close()
+ db_conn.close()
diff --git a/.buildkite/scripts/test_synapse_port_db.sh b/.buildkite/scripts/test_synapse_port_db.sh
new file mode 100755
index 0000000000..9ed2177635
--- /dev/null
+++ b/.buildkite/scripts/test_synapse_port_db.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+#
+# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
+# with additional dependencies needed for the test (such as coverage or the PostgreSQL
+# driver), update the schema of the test SQLite database and run background updates on it,
+# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
+# test porting the SQLite database to the PostgreSQL database (with coverage).
+
+set -xe
+cd `dirname $0`/../..
+
+echo "--- Install dependencies"
+
+# Install dependencies for this test.
+pip install psycopg2 coverage coverage-enable-subprocess
+
+# Install Synapse itself. This won't update any libraries.
+pip install -e .
+
+echo "--- Generate the signing key"
+
+# Generate the server's signing key.
+python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
+
+echo "--- Prepare the databases"
+
+# Make sure the SQLite3 database is using the latest schema and has no pending background update.
+scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
+
+# Create the PostgreSQL database.
+./.buildkite/scripts/create_postgres_db.py
+
+echo "+++ Run synapse_port_db"
+
+# Run the script
+coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
diff --git a/.buildkite/sqlite-config.yaml b/.buildkite/sqlite-config.yaml
new file mode 100644
index 0000000000..635b921764
--- /dev/null
+++ b/.buildkite/sqlite-config.yaml
@@ -0,0 +1,18 @@
+# Configuration file used for testing the 'synapse_port_db' script.
+# Tells the 'update_database' script to connect to the test SQLite database to upgrade its
+# schema and run background updates on it.
+server_name: "test"
+
+signing_key_path: "/src/.buildkite/test.signing.key"
+
+report_stats: false
+
+database:
+ name: "sqlite3"
+ args:
+ database: ".buildkite/test_db.db"
+
+# Suppress the key server warning.
+trusted_key_servers:
+ - server_name: "matrix.org"
+suppress_key_server_warning: true
diff --git a/.buildkite/test_db.db b/.buildkite/test_db.db
new file mode 100644
index 0000000000..f20567ba73
--- /dev/null
+++ b/.buildkite/test_db.db
Binary files differdiff --git a/AUTHORS.rst b/AUTHORS.rst
index d8b4a846d8..b8b31a5b47 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -1,34 +1,8 @@
-Erik Johnston <erik at matrix.org>
- * HS core
- * Federation API impl
+The following is an incomplete list of people outside the core team who have
+contributed to Synapse. It is no longer maintained: more recent contributions
+are listed in the `changelog <CHANGES.md>`_.
-Mark Haines <mark at matrix.org>
- * HS core
- * Crypto
- * Content repository
- * CS v2 API impl
-
-Kegan Dougal <kegan at matrix.org>
- * HS core
- * CS v1 API impl
- * AS API impl
-
-Paul "LeoNerd" Evans <paul at matrix.org>
- * HS core
- * Presence
- * Typing Notifications
- * Performance metrics and caching layer
-
-Dave Baker <dave at matrix.org>
- * Push notifications
- * Auth CS v2 impl
-
-Matthew Hodgson <matthew at matrix.org>
- * General doc & housekeeping
- * Vertobot/vertobridge matrix<->verto PoC
-
-Emmanuel Rohee <manu at matrix.org>
- * Supporting iOS clients (testability and fallback registration)
+----
Turned to Dust <dwinslow86 at gmail.com>
* ArchLinux installation instructions
@@ -62,16 +36,13 @@ Christoph Witzany <christoph at web.crofting.com>
* Add LDAP support for authentication
Pierre Jaury <pierre at jaury.eu>
-* Docker packaging
+ * Docker packaging
Serban Constantin <serban.constantin at gmail dot com>
* Small bug fix
-Jason Robinson <jasonr at matrix.org>
- * Minor fixes
-
Joseph Weston <joseph at weston.cloud>
- + Add admin API for querying HS version
+ * Add admin API for querying HS version
Benjamin Saunders <ben.e.saunders at gmail dot com>
* Documentation improvements
diff --git a/CHANGES.md b/CHANGES.md
index cd23b8112b..6faa4b8dce 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,114 @@
+Synapse 1.5.0 (2019-10-29)
+==========================
+
+Security updates
+----------------
+
+This release includes a security fix ([\#6262](https://github.com/matrix-org/synapse/issues/6262), below). Administrators are encouraged to upgrade as soon as possible.
+
+Bugfixes
+--------
+
+- Fix bug where room directory search was case sensitive. ([\#6268](https://github.com/matrix-org/synapse/issues/6268))
+
+
+Synapse 1.5.0rc2 (2019-10-28)
+=============================
+
+Bugfixes
+--------
+
+- Update list of boolean columns in `synapse_port_db`. ([\#6247](https://github.com/matrix-org/synapse/issues/6247))
+- Fix /keys/query API on workers. ([\#6256](https://github.com/matrix-org/synapse/issues/6256))
+- Improve signature checking on some federation APIs. ([\#6262](https://github.com/matrix-org/synapse/issues/6262))
+
+
+Internal Changes
+----------------
+
+- Move schema delta files to the correct data store. ([\#6248](https://github.com/matrix-org/synapse/issues/6248))
+- Small performance improvement by removing repeated config lookups in room stats calculation. ([\#6255](https://github.com/matrix-org/synapse/issues/6255))
+
+
+Synapse 1.5.0rc1 (2019-10-24)
+==========================
+
+Features
+--------
+
+- Improve quality of thumbnails for 1-bit/8-bit color palette images. ([\#2142](https://github.com/matrix-org/synapse/issues/2142))
+- Add ability to upload cross-signing signatures. ([\#5726](https://github.com/matrix-org/synapse/issues/5726))
+- Allow uploading of cross-signing keys. ([\#5769](https://github.com/matrix-org/synapse/issues/5769))
+- CAS login now provides a default display name for users if a `displayname_attribute` is set in the configuration file. ([\#6114](https://github.com/matrix-org/synapse/issues/6114))
+- Reject all pending invites for a user during deactivation. ([\#6125](https://github.com/matrix-org/synapse/issues/6125))
+- Add config option to suppress client side resource limit alerting. ([\#6173](https://github.com/matrix-org/synapse/issues/6173))
+
+
+Bugfixes
+--------
+
+- Return an HTTP 404 instead of 400 when requesting a filter by ID that is unknown to the server. Thanks to @krombel for contributing this! ([\#2380](https://github.com/matrix-org/synapse/issues/2380))
+- Fix a bug where users could be invited twice to the same group. ([\#3436](https://github.com/matrix-org/synapse/issues/3436))
+- Fix `/createRoom` failing with badly-formatted MXIDs in the invitee list. Thanks to @wener291! ([\#4088](https://github.com/matrix-org/synapse/issues/4088))
+- Make the `synapse_port_db` script create the right indexes on a new PostgreSQL database. ([\#6102](https://github.com/matrix-org/synapse/issues/6102), [\#6178](https://github.com/matrix-org/synapse/issues/6178), [\#6243](https://github.com/matrix-org/synapse/issues/6243))
+- Fix bug when uploading a large file: Synapse responds with `M_UNKNOWN` while it should be `M_TOO_LARGE` according to spec. Contributed by Anshul Angaria. ([\#6109](https://github.com/matrix-org/synapse/issues/6109))
+- Fix user push rules being deleted from a room when it is upgraded. ([\#6144](https://github.com/matrix-org/synapse/issues/6144))
+- Don't 500 when trying to exchange a revoked 3PID invite. ([\#6147](https://github.com/matrix-org/synapse/issues/6147))
+- Fix transferring notifications and tags when joining an upgraded room that is new to your server. ([\#6155](https://github.com/matrix-org/synapse/issues/6155))
+- Fix bug where guest account registration can wedge after restart. ([\#6161](https://github.com/matrix-org/synapse/issues/6161))
+- Fix monthly active user reaping when reserved users are specified. ([\#6168](https://github.com/matrix-org/synapse/issues/6168))
+- Fix `/federation/v1/state` endpoint not supporting newer room versions. ([\#6170](https://github.com/matrix-org/synapse/issues/6170))
+- Fix bug where we were updating censored events as bytes rather than text, occaisonally causing invalid JSON being inserted breaking APIs that attempted to fetch such events. ([\#6186](https://github.com/matrix-org/synapse/issues/6186))
+- Fix occasional missed updates in the room and user directories. ([\#6187](https://github.com/matrix-org/synapse/issues/6187))
+- Fix tracing of non-JSON APIs, `/media`, `/key` etc. ([\#6195](https://github.com/matrix-org/synapse/issues/6195))
+- Fix bug where presence would not get timed out correctly if a synchrotron worker is used and restarted. ([\#6212](https://github.com/matrix-org/synapse/issues/6212))
+- synapse_port_db: Add 2 additional BOOLEAN_COLUMNS to be able to convert from database schema v56. ([\#6216](https://github.com/matrix-org/synapse/issues/6216))
+- Fix a bug where the Synapse demo script blacklisted `::1` (ipv6 localhost) from receiving federation traffic. ([\#6229](https://github.com/matrix-org/synapse/issues/6229))
+
+
+Updates to the Docker image
+---------------------------
+
+- Fix logging getting lost for the docker image. ([\#6197](https://github.com/matrix-org/synapse/issues/6197))
+
+
+Internal Changes
+----------------
+
+- Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this. ([\#1172](https://github.com/matrix-org/synapse/issues/1172), [\#6175](https://github.com/matrix-org/synapse/issues/6175), [\#6184](https://github.com/matrix-org/synapse/issues/6184))
+- Allow devices to be marked as hidden, for use by features such as cross-signing.
+ This adds a new field with a default value to the devices field in the database,
+ and so the database upgrade may take a long time depending on how many devices
+ are in the database. ([\#5759](https://github.com/matrix-org/synapse/issues/5759))
+- Move lookup-related functions from RoomMemberHandler to IdentityHandler. ([\#5978](https://github.com/matrix-org/synapse/issues/5978))
+- Improve performance of the public room list directory. ([\#6019](https://github.com/matrix-org/synapse/issues/6019), [\#6152](https://github.com/matrix-org/synapse/issues/6152), [\#6153](https://github.com/matrix-org/synapse/issues/6153), [\#6154](https://github.com/matrix-org/synapse/issues/6154))
+- Edit header dicts docstrings in `SimpleHttpClient` to note that `str` or `bytes` can be passed as header keys. ([\#6077](https://github.com/matrix-org/synapse/issues/6077))
+- Add snapcraft packaging information. Contributed by @devec0. ([\#6084](https://github.com/matrix-org/synapse/issues/6084), [\#6191](https://github.com/matrix-org/synapse/issues/6191))
+- Kill off half-implemented password-reset via sms. ([\#6101](https://github.com/matrix-org/synapse/issues/6101))
+- Remove `get_user_by_req` opentracing span and add some tags. ([\#6108](https://github.com/matrix-org/synapse/issues/6108))
+- Drop some unused database tables. ([\#6115](https://github.com/matrix-org/synapse/issues/6115))
+- Add env var to turn on tracking of log context changes. ([\#6127](https://github.com/matrix-org/synapse/issues/6127))
+- Refactor configuration loading to allow better typechecking. ([\#6137](https://github.com/matrix-org/synapse/issues/6137))
+- Log responder when responding to media request. ([\#6139](https://github.com/matrix-org/synapse/issues/6139))
+- Improve performance of `find_next_generated_user_id` DB query. ([\#6148](https://github.com/matrix-org/synapse/issues/6148))
+- Expand type-checking on modules imported by `synapse.config`. ([\#6150](https://github.com/matrix-org/synapse/issues/6150))
+- Use Postgres ANY for selecting many values. ([\#6156](https://github.com/matrix-org/synapse/issues/6156))
+- Add more caching to `_get_joined_users_from_context` DB query. ([\#6159](https://github.com/matrix-org/synapse/issues/6159))
+- Add some metrics on the federation sender. ([\#6160](https://github.com/matrix-org/synapse/issues/6160))
+- Add some logging to the rooms stats updates, to try to track down a flaky test. ([\#6167](https://github.com/matrix-org/synapse/issues/6167))
+- Remove unused `timeout` parameter from `_get_public_room_list`. ([\#6179](https://github.com/matrix-org/synapse/issues/6179))
+- Reject (accidental) attempts to insert bytes into postgres tables. ([\#6186](https://github.com/matrix-org/synapse/issues/6186))
+- Make `version` optional in body of `PUT /room_keys/version/{version}`, since it's redundant. ([\#6189](https://github.com/matrix-org/synapse/issues/6189))
+- Make storage layer responsible for adding device names to key, rather than the handler. ([\#6193](https://github.com/matrix-org/synapse/issues/6193))
+- Port `synapse.rest.admin` module to use async/await. ([\#6196](https://github.com/matrix-org/synapse/issues/6196))
+- Enforce that all boolean configuration values are lowercase in CI. ([\#6203](https://github.com/matrix-org/synapse/issues/6203))
+- Remove some unused event-auth code. ([\#6214](https://github.com/matrix-org/synapse/issues/6214))
+- Remove `Auth.check` method. ([\#6217](https://github.com/matrix-org/synapse/issues/6217))
+- Remove `format_tap.py` script in favour of a perl reimplementation in Sytest's repo. ([\#6219](https://github.com/matrix-org/synapse/issues/6219))
+- Refactor storage layer in preparation to support having multiple databases. ([\#6231](https://github.com/matrix-org/synapse/issues/6231))
+- Remove some extra quotation marks across the codebase. ([\#6236](https://github.com/matrix-org/synapse/issues/6236))
+
+
Synapse 1.4.1 (2019-10-18)
==========================
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 620dc88ce2..a71a4a696b 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -114,17 +114,6 @@ directory, you will need both a regular newsfragment *and* an entry in the
debian changelog. (Though typically such changes should be submitted as two
separate pull requests.)
-Attribution
-~~~~~~~~~~~
-
-Everyone who contributes anything to Matrix is welcome to be listed in the
-AUTHORS.rst file for the project in question. Please feel free to include a
-change to AUTHORS.rst in your pull request to list yourself and a short
-description of the area(s) you've worked on. Also, we sometimes have swag to
-give away to contributors - if you feel that Matrix-branded apparel is missing
-from your life, please mail us your shipping address to matrix at matrix.org and
-we'll try to fix it :)
-
Sign off
~~~~~~~~
diff --git a/UPGRADE.rst b/UPGRADE.rst
index 9562114d59..5ebf16a73e 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -2,7 +2,7 @@ Upgrading Synapse
=================
Before upgrading check if any special steps are required to upgrade from the
-what you currently have installed to current version of Synapse. The extra
+version you currently have installed to the current version of Synapse. The extra
instructions that may be required are listed later in this document.
* If Synapse was installed using `prebuilt packages
@@ -29,7 +29,7 @@ instructions that may be required are listed later in this document.
running:
.. code:: bash
-
+
git pull
pip install --upgrade .
@@ -75,6 +75,16 @@ for example:
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
+
+Upgrading to v1.5.0
+===================
+
+This release includes a database migration which may take several minutes to
+complete if there are a large number (more than a million or so) of entries in
+the ``devices`` table. This is only likely to a be a problem on very large
+installations.
+
+
Upgrading to v1.4.0
===================
diff --git a/changelog.d/1172.misc b/changelog.d/1172.misc
deleted file mode 100644
index 30b3e56082..0000000000
--- a/changelog.d/1172.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this.
\ No newline at end of file
diff --git a/changelog.d/2142.feature b/changelog.d/2142.feature
deleted file mode 100644
index e21e8325e1..0000000000
--- a/changelog.d/2142.feature
+++ /dev/null
@@ -1 +0,0 @@
-Improve quality of thumbnails for 1-bit/8-bit color palette images.
diff --git a/changelog.d/2380.bugfix b/changelog.d/2380.bugfix
deleted file mode 100644
index eae3206031..0000000000
--- a/changelog.d/2380.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Return an HTTP 404 instead of 400 when requesting a filter by ID that is unknown to the server. Thanks to @krombel for contributing this!
diff --git a/changelog.d/3436.bugfix b/changelog.d/3436.bugfix
deleted file mode 100644
index 15714a11e0..0000000000
--- a/changelog.d/3436.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a problem where users could be invited twice to the same group.
diff --git a/changelog.d/4088.bugfix b/changelog.d/4088.bugfix
deleted file mode 100644
index 61722b6224..0000000000
--- a/changelog.d/4088.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Added domain validation when including a list of invitees upon room creation.
\ No newline at end of file
diff --git a/changelog.d/5726.feature b/changelog.d/5726.feature
deleted file mode 100644
index d3c669aec0..0000000000
--- a/changelog.d/5726.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add ability to upload cross-signing signatures.
diff --git a/changelog.d/5759.misc b/changelog.d/5759.misc
deleted file mode 100644
index dc7e2c01bf..0000000000
--- a/changelog.d/5759.misc
+++ /dev/null
@@ -1,4 +0,0 @@
-Allow devices to be marked as hidden, for use by features such as cross-signing.
-This adds a new field with a default value to the devices field in the database,
-and so the database upgrade may take a long time depending on how many devices
-are in the database.
diff --git a/changelog.d/5769.feature b/changelog.d/5769.feature
deleted file mode 100644
index bf994ca327..0000000000
--- a/changelog.d/5769.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow uploading of cross-signing keys.
\ No newline at end of file
diff --git a/changelog.d/5978.misc b/changelog.d/5978.misc
deleted file mode 100644
index 6d2b69b11b..0000000000
--- a/changelog.d/5978.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move lookup-related functions from RoomMemberHandler to IdentityHandler.
\ No newline at end of file
diff --git a/changelog.d/6019.misc b/changelog.d/6019.misc
deleted file mode 100644
index dfee73c28f..0000000000
--- a/changelog.d/6019.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of the public room list directory.
diff --git a/changelog.d/6077.misc b/changelog.d/6077.misc
deleted file mode 100644
index 31ac5b97a4..0000000000
--- a/changelog.d/6077.misc
+++ /dev/null
@@ -1 +0,0 @@
-Edit header dicts docstrings in SimpleHttpClient to note that `str` or `bytes` can be passed as header keys.
diff --git a/changelog.d/6084.misc b/changelog.d/6084.misc
deleted file mode 100644
index 3c33701651..0000000000
--- a/changelog.d/6084.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add snapcraft packaging information. Contributed by @devec0.
diff --git a/changelog.d/6101.misc b/changelog.d/6101.misc
deleted file mode 100644
index 9743abb9e9..0000000000
--- a/changelog.d/6101.misc
+++ /dev/null
@@ -1 +0,0 @@
-Kill off half-implemented password-reset via sms.
diff --git a/changelog.d/6108.misc b/changelog.d/6108.misc
deleted file mode 100644
index 6c3f9460e9..0000000000
--- a/changelog.d/6108.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove `get_user_by_req` opentracing span and add some tags.
diff --git a/changelog.d/6109.bugfix b/changelog.d/6109.bugfix
deleted file mode 100644
index da7ac1be4e..0000000000
--- a/changelog.d/6109.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug when uploading a large file: Synapse responds with `M_UNKNOWN` while it should be `M_TOO_LARGE` according to spec. Contributed by Anshul Angaria.
diff --git a/changelog.d/6114.feature b/changelog.d/6114.feature
deleted file mode 100644
index a34ab12148..0000000000
--- a/changelog.d/6114.feature
+++ /dev/null
@@ -1 +0,0 @@
-CAS login now provides a default display name for users if a `displayname_attribute` is set in the configuration file.
diff --git a/changelog.d/6115.misc b/changelog.d/6115.misc
deleted file mode 100644
index b19e395a99..0000000000
--- a/changelog.d/6115.misc
+++ /dev/null
@@ -1 +0,0 @@
-Drop some unused database tables.
diff --git a/changelog.d/6125.feature b/changelog.d/6125.feature
deleted file mode 100644
index cbe5f8d3c8..0000000000
--- a/changelog.d/6125.feature
+++ /dev/null
@@ -1 +0,0 @@
-Reject all pending invites for a user during deactivation.
diff --git a/changelog.d/6127.misc b/changelog.d/6127.misc
deleted file mode 100644
index 7bfbcfc252..0000000000
--- a/changelog.d/6127.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add env var to turn on tracking of log context changes.
diff --git a/changelog.d/6137.misc b/changelog.d/6137.misc
deleted file mode 100644
index 92a02e71c3..0000000000
--- a/changelog.d/6137.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor configuration loading to allow better typechecking.
diff --git a/changelog.d/6139.misc b/changelog.d/6139.misc
deleted file mode 100644
index d4b65e7af8..0000000000
--- a/changelog.d/6139.misc
+++ /dev/null
@@ -1 +0,0 @@
-Log responder when responding to media request.
diff --git a/changelog.d/6140.misc b/changelog.d/6140.misc
new file mode 100644
index 0000000000..0feb97ec61
--- /dev/null
+++ b/changelog.d/6140.misc
@@ -0,0 +1 @@
+Add a CI job to test the `synapse_port_db` script.
\ No newline at end of file
diff --git a/changelog.d/6144.bugfix b/changelog.d/6144.bugfix
deleted file mode 100644
index eee63961e4..0000000000
--- a/changelog.d/6144.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent user push rules being deleted from a room when it is upgraded.
\ No newline at end of file
diff --git a/changelog.d/6147.bugfix b/changelog.d/6147.bugfix
deleted file mode 100644
index b0f936d280..0000000000
--- a/changelog.d/6147.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Don't 500 when trying to exchange a revoked 3PID invite.
diff --git a/changelog.d/6148.misc b/changelog.d/6148.misc
deleted file mode 100644
index 1d5213345c..0000000000
--- a/changelog.d/6148.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of `find_next_generated_user_id` DB query.
diff --git a/changelog.d/6150.misc b/changelog.d/6150.misc
deleted file mode 100644
index a373c091ab..0000000000
--- a/changelog.d/6150.misc
+++ /dev/null
@@ -1 +0,0 @@
-Expand type-checking on modules imported by synapse.config.
diff --git a/changelog.d/6152.misc b/changelog.d/6152.misc
deleted file mode 100644
index dfee73c28f..0000000000
--- a/changelog.d/6152.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of the public room list directory.
diff --git a/changelog.d/6153.misc b/changelog.d/6153.misc
deleted file mode 100644
index dfee73c28f..0000000000
--- a/changelog.d/6153.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of the public room list directory.
diff --git a/changelog.d/6154.misc b/changelog.d/6154.misc
deleted file mode 100644
index dfee73c28f..0000000000
--- a/changelog.d/6154.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of the public room list directory.
diff --git a/changelog.d/6155.bugfix b/changelog.d/6155.bugfix
deleted file mode 100644
index e32c0dce09..0000000000
--- a/changelog.d/6155.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix transferring notifications and tags when joining an upgraded room that is new to your server.
\ No newline at end of file
diff --git a/changelog.d/6156.misc b/changelog.d/6156.misc
deleted file mode 100644
index 49525e9416..0000000000
--- a/changelog.d/6156.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use Postgres ANY for selecting many values.
diff --git a/changelog.d/6159.misc b/changelog.d/6159.misc
deleted file mode 100644
index 06cc163f8b..0000000000
--- a/changelog.d/6159.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add more caching to `_get_joined_users_from_context` DB query.
diff --git a/changelog.d/6160.misc b/changelog.d/6160.misc
deleted file mode 100644
index 3d7cce00e1..0000000000
--- a/changelog.d/6160.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some metrics on the federation sender.
diff --git a/changelog.d/6161.bugfix b/changelog.d/6161.bugfix
deleted file mode 100644
index a0e2adb979..0000000000
--- a/changelog.d/6161.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where guest account registration can wedge after restart.
diff --git a/changelog.d/6167.misc b/changelog.d/6167.misc
deleted file mode 100644
index 32c96b3681..0000000000
--- a/changelog.d/6167.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some logging to the rooms stats updates, to try to track down a flaky test.
diff --git a/changelog.d/6168.bugfix b/changelog.d/6168.bugfix
deleted file mode 100644
index 39e8e9d019..0000000000
--- a/changelog.d/6168.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix monthly active user reaping where reserved users are specified.
diff --git a/changelog.d/6170.bugfix b/changelog.d/6170.bugfix
deleted file mode 100644
index 52f7ea233c..0000000000
--- a/changelog.d/6170.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix /federation/v1/state endpoint for recent room versions.
diff --git a/changelog.d/6175.misc b/changelog.d/6175.misc
deleted file mode 100644
index 30b3e56082..0000000000
--- a/changelog.d/6175.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this.
\ No newline at end of file
diff --git a/changelog.d/6178.bugfix b/changelog.d/6178.bugfix
deleted file mode 100644
index cd288c2a44..0000000000
--- a/changelog.d/6178.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Make the `synapse_port_db` script create the right indexes on a new PostgreSQL database.
diff --git a/changelog.d/6179.misc b/changelog.d/6179.misc
deleted file mode 100644
index 01c4e71ea3..0000000000
--- a/changelog.d/6179.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove unused `timeout` parameter from `_get_public_room_list`.
\ No newline at end of file
diff --git a/changelog.d/6184.misc b/changelog.d/6184.misc
deleted file mode 100644
index 30b3e56082..0000000000
--- a/changelog.d/6184.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this.
\ No newline at end of file
diff --git a/changelog.d/6186.bugfix b/changelog.d/6186.bugfix
deleted file mode 100644
index 199ec69032..0000000000
--- a/changelog.d/6186.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where we were updating censored events as bytes rather than text, occaisonally causing invalid JSON being inserted breaking APIs that attempted to fetch such events.
diff --git a/changelog.d/6186.misc b/changelog.d/6186.misc
deleted file mode 100644
index 5e1314a0ac..0000000000
--- a/changelog.d/6186.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reject (accidental) attempts to insert bytes into postgres tables.
diff --git a/changelog.d/6187.bugfix b/changelog.d/6187.bugfix
deleted file mode 100644
index 6142c5b98d..0000000000
--- a/changelog.d/6187.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix occasional missed updates in the room and user directories.
\ No newline at end of file
diff --git a/changelog.d/6189.misc b/changelog.d/6189.misc
deleted file mode 100644
index a66eb384e6..0000000000
--- a/changelog.d/6189.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make `version` optional in body of `PUT /room_keys/version/{version}`, since it's redundant.
diff --git a/changelog.d/6191.misc b/changelog.d/6191.misc
deleted file mode 100644
index 3c33701651..0000000000
--- a/changelog.d/6191.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add snapcraft packaging information. Contributed by @devec0.
diff --git a/changelog.d/6193.misc b/changelog.d/6193.misc
deleted file mode 100644
index 8e3707f8fd..0000000000
--- a/changelog.d/6193.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make storage layer responsible for adding device names to key, rather than the handler.
diff --git a/changelog.d/6195.bugfix b/changelog.d/6195.bugfix
deleted file mode 100644
index d22935dbcd..0000000000
--- a/changelog.d/6195.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix tracing of non-JSON APIs, /media, /key etc.
diff --git a/changelog.d/6196.misc b/changelog.d/6196.misc
deleted file mode 100644
index 3897b1216f..0000000000
--- a/changelog.d/6196.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port synapse.rest.admin module to use async/await.
diff --git a/changelog.d/6197.docker b/changelog.d/6197.docker
deleted file mode 100644
index 71fb9cbff5..0000000000
--- a/changelog.d/6197.docker
+++ /dev/null
@@ -1 +0,0 @@
-Fix logging getting lost for the docker image.
diff --git a/changelog.d/6203.misc b/changelog.d/6203.misc
deleted file mode 100644
index c1d8276d45..0000000000
--- a/changelog.d/6203.misc
+++ /dev/null
@@ -1 +0,0 @@
-Enforce that all boolean configuration values are lowercase in CI.
diff --git a/changelog.d/6212.bugfix b/changelog.d/6212.bugfix
deleted file mode 100644
index 918755fee0..0000000000
--- a/changelog.d/6212.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where presence would not get timed out correctly if a synchrotron worker is used and restarted.
diff --git a/changelog.d/6214.misc b/changelog.d/6214.misc
deleted file mode 100644
index c3fd04d0d8..0000000000
--- a/changelog.d/6214.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove some unused event-auth code.
diff --git a/changelog.d/6216.bugfix b/changelog.d/6216.bugfix
deleted file mode 100644
index 5784e82d18..0000000000
--- a/changelog.d/6216.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-synapse_port_db: Add 2 additional BOOLEAN_COLUMNS to be able to convert from database schema v56.
diff --git a/changelog.d/6217.misc b/changelog.d/6217.misc
deleted file mode 100644
index 503352ee0b..0000000000
--- a/changelog.d/6217.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove Auth.check method.
diff --git a/changelog.d/6218.misc b/changelog.d/6218.misc
new file mode 100644
index 0000000000..49d10c36cf
--- /dev/null
+++ b/changelog.d/6218.misc
@@ -0,0 +1 @@
+Convert EventContext to an attrs.
diff --git a/changelog.d/6219.misc b/changelog.d/6219.misc
deleted file mode 100644
index 296406246d..0000000000
--- a/changelog.d/6219.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove `format_tap.py` script in favour of a perl reimplementation in Sytest's repo.
\ No newline at end of file
diff --git a/changelog.d/6229.bugfix b/changelog.d/6229.bugfix
deleted file mode 100644
index bced3304d0..0000000000
--- a/changelog.d/6229.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent the demo Synapse's from blacklisting `::1`.
\ No newline at end of file
diff --git a/changelog.d/6231.misc b/changelog.d/6231.misc
deleted file mode 100644
index 89b8297794..0000000000
--- a/changelog.d/6231.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor storage layer in preparation to support having multiple databases.
diff --git a/changelog.d/6250.misc b/changelog.d/6250.misc
new file mode 100644
index 0000000000..12e3fe66b0
--- /dev/null
+++ b/changelog.d/6250.misc
@@ -0,0 +1 @@
+Reduce verbosity of user/room stats.
diff --git a/changelog.d/6251.misc b/changelog.d/6251.misc
new file mode 100644
index 0000000000..371c6983be
--- /dev/null
+++ b/changelog.d/6251.misc
@@ -0,0 +1 @@
+Reduce impact of debug logging.
diff --git a/changelog.d/6253.bugfix b/changelog.d/6253.bugfix
new file mode 100644
index 0000000000..266fae381c
--- /dev/null
+++ b/changelog.d/6253.bugfix
@@ -0,0 +1 @@
+Delete keys from key backup when deleting backup versions.
diff --git a/changelog.d/6257.doc b/changelog.d/6257.doc
new file mode 100644
index 0000000000..e985afde0e
--- /dev/null
+++ b/changelog.d/6257.doc
@@ -0,0 +1 @@
+Modify CAPTCHA_SETUP.md to update the terms `private key` and `public key` to `secret key` and `site key` respectively. Contributed by Yash Jipkate.
diff --git a/changelog.d/6263.misc b/changelog.d/6263.misc
new file mode 100644
index 0000000000..7b1bb4b679
--- /dev/null
+++ b/changelog.d/6263.misc
@@ -0,0 +1 @@
+Change cache descriptors to always return deferreds.
diff --git a/changelog.d/6269.misc b/changelog.d/6269.misc
new file mode 100644
index 0000000000..9fd333cc89
--- /dev/null
+++ b/changelog.d/6269.misc
@@ -0,0 +1 @@
+Fix incorrect comment regarding the functionality of an `if` statement.
\ No newline at end of file
diff --git a/changelog.d/6270.misc b/changelog.d/6270.misc
new file mode 100644
index 0000000000..d1c5811323
--- /dev/null
+++ b/changelog.d/6270.misc
@@ -0,0 +1 @@
+Update CI to run `isort` over the `scripts` and `scripts-dev` directories.
\ No newline at end of file
diff --git a/changelog.d/6273.doc b/changelog.d/6273.doc
new file mode 100644
index 0000000000..21a41d987d
--- /dev/null
+++ b/changelog.d/6273.doc
@@ -0,0 +1 @@
+Fix a small typo in `account_threepid_delegates` configuration option.
\ No newline at end of file
diff --git a/changelog.d/6274.misc b/changelog.d/6274.misc
new file mode 100644
index 0000000000..eb4966124f
--- /dev/null
+++ b/changelog.d/6274.misc
@@ -0,0 +1 @@
+Port replication http server endpoints to async/await.
diff --git a/changelog.d/6275.misc b/changelog.d/6275.misc
new file mode 100644
index 0000000000..f57e2c4adb
--- /dev/null
+++ b/changelog.d/6275.misc
@@ -0,0 +1 @@
+Port room rest handlers to async/await.
diff --git a/changelog.d/6276.misc b/changelog.d/6276.misc
new file mode 100644
index 0000000000..4a4428251e
--- /dev/null
+++ b/changelog.d/6276.misc
@@ -0,0 +1 @@
+Add a CI job to test the `synapse_port_db` script.
diff --git a/changelog.d/6277.misc b/changelog.d/6277.misc
new file mode 100644
index 0000000000..490713577f
--- /dev/null
+++ b/changelog.d/6277.misc
@@ -0,0 +1 @@
+Remove redundant CLI parameters on CI's `flake8` step.
\ No newline at end of file
diff --git a/changelog.d/6278.bugfix b/changelog.d/6278.bugfix
new file mode 100644
index 0000000000..c107270461
--- /dev/null
+++ b/changelog.d/6278.bugfix
@@ -0,0 +1 @@
+Fix exception when remote servers attempt to join a room that they're not allowed to join.
diff --git a/changelog.d/6279.misc b/changelog.d/6279.misc
new file mode 100644
index 0000000000..5f5144a9ee
--- /dev/null
+++ b/changelog.d/6279.misc
@@ -0,0 +1 @@
+Port `federation_server.py` to async/await.
diff --git a/changelog.d/6280.misc b/changelog.d/6280.misc
new file mode 100644
index 0000000000..96a0eb21b2
--- /dev/null
+++ b/changelog.d/6280.misc
@@ -0,0 +1 @@
+Port receipt and read markers to async/wait.
diff --git a/changelog.d/6284.bugfix b/changelog.d/6284.bugfix
new file mode 100644
index 0000000000..cf15053d2d
--- /dev/null
+++ b/changelog.d/6284.bugfix
@@ -0,0 +1 @@
+Prevent errors from appearing on Synapse startup if `git` is not installed.
\ No newline at end of file
diff --git a/changelog.d/6291.misc b/changelog.d/6291.misc
new file mode 100644
index 0000000000..7b1bb4b679
--- /dev/null
+++ b/changelog.d/6291.misc
@@ -0,0 +1 @@
+Change cache descriptors to always return deferreds.
diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py
index 5ef140ae48..6b22400a60 100644
--- a/contrib/experiments/test_messaging.py
+++ b/contrib/experiments/test_messaging.py
@@ -339,7 +339,7 @@ def main(stdscr):
root_logger = logging.getLogger()
formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(lineno)d - " "%(levelname)s - %(message)s"
+ "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
)
if not os.path.exists("logs"):
os.makedirs("logs")
diff --git a/contrib/graph/graph2.py b/contrib/graph/graph2.py
index 9db8725eee..4619f0e3c1 100644
--- a/contrib/graph/graph2.py
+++ b/contrib/graph/graph2.py
@@ -36,7 +36,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
args = [room_id]
if limit:
- sql += " ORDER BY topological_ordering DESC, stream_ordering DESC " "LIMIT ?"
+ sql += " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
args.append(limit)
@@ -53,7 +53,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
for event in events:
c = conn.execute(
- "SELECT state_group FROM event_to_state_groups " "WHERE event_id = ?",
+ "SELECT state_group FROM event_to_state_groups WHERE event_id = ?",
(event.event_id,),
)
diff --git a/debian/changelog b/debian/changelog
index 02f2b508c2..acda7e5c63 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.5.0) stable; urgency=medium
+
+ * New synapse release 1.5.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 29 Oct 2019 14:28:41 +0000
+
matrix-synapse-py3 (1.4.1) stable; urgency=medium
* New synapse release 1.4.1.
diff --git a/docker/README.md b/docker/README.md
index 4b712f3f5c..24dfa77dcc 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -101,7 +101,7 @@ is suitable for local testing, but for any practical use, you will either need
to use a reverse proxy, or configure Synapse to expose an HTTPS port.
For documentation on using a reverse proxy, see
-https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
+https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
For more information on enabling TLS support in synapse itself, see
https://github.com/matrix-org/synapse/blob/master/INSTALL.md#tls-certificates. Of
diff --git a/docs/CAPTCHA_SETUP.md b/docs/CAPTCHA_SETUP.md
index 5f9057530b..331e5d059a 100644
--- a/docs/CAPTCHA_SETUP.md
+++ b/docs/CAPTCHA_SETUP.md
@@ -4,7 +4,7 @@ The captcha mechanism used is Google's ReCaptcha. This requires API keys from Go
## Getting keys
-Requires a public/private key pair from:
+Requires a site/secret key pair from:
<https://developers.google.com/recaptcha/>
@@ -15,8 +15,8 @@ Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
The keys are a config option on the home server config. If they are not
visible, you can generate them via `--generate-config`. Set the following value:
- recaptcha_public_key: YOUR_PUBLIC_KEY
- recaptcha_private_key: YOUR_PRIVATE_KEY
+ recaptcha_public_key: YOUR_SITE_KEY
+ recaptcha_private_key: YOUR_SECRET_KEY
In addition, you MUST enable captchas via:
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index b4dd146f06..d2f4aff826 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -241,7 +241,6 @@ listeners:
#
#hs_disabled: false
#hs_disabled_message: 'Human readable reason for why the HS is blocked'
-#hs_disabled_limit_type: 'error code(str), to help clients decode reason'
# Monthly Active User Blocking
#
@@ -261,9 +260,16 @@ listeners:
# sign up in a short space of time never to return after their initial
# session.
#
+# 'mau_limit_alerting' is a means of limiting client side alerting
+# should the mau limit be reached. This is useful for small instances
+# where the admin has 5 mau seats (say) for 5 specific people and no
+# interest increasing the mau limit further. Defaults to True, which
+# means that alerting is enabled
+#
#limit_usage_by_mau: false
#max_mau_value: 50
#mau_trial_days: 2
+#mau_limit_alerting: false
# If enabled, the metrics for the number of monthly active users will
# be populated, however no one will be limited. If limit_usage_by_mau
@@ -949,7 +955,7 @@ uploads_path: "DATADIR/uploads"
# If a delegate is specified, the config option public_baseurl must also be filled out.
#
account_threepid_delegates:
- #email: https://example.com # Delegate email sending to example.org
+ #email: https://example.com # Delegate email sending to example.com
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
# Users who register on this homeserver will automatically be joined
diff --git a/scripts-dev/update_database b/scripts-dev/update_database
new file mode 100755
index 0000000000..27a1ad1e7e
--- /dev/null
+++ b/scripts-dev/update_database
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import logging
+import sys
+
+import yaml
+
+from twisted.internet import defer, reactor
+
+from synapse.config.homeserver import HomeServerConfig
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.server import HomeServer
+from synapse.storage import DataStore
+from synapse.storage.engines import create_engine
+from synapse.storage.prepare_database import prepare_database
+
+logger = logging.getLogger("update_database")
+
+
+class MockHomeserver(HomeServer):
+ DATASTORE_CLASS = DataStore
+
+ def __init__(self, config, database_engine, db_conn, **kwargs):
+ super(MockHomeserver, self).__init__(
+ config.server_name,
+ reactor=reactor,
+ config=config,
+ database_engine=database_engine,
+ **kwargs
+ )
+
+ self.database_engine = database_engine
+ self.db_conn = db_conn
+
+ def get_db_conn(self):
+ return self.db_conn
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description=(
+ "Updates a synapse database to the latest schema and runs background updates"
+ " on it."
+ )
+ )
+ parser.add_argument("-v", action='store_true')
+ parser.add_argument(
+ "--database-config",
+ type=argparse.FileType('r'),
+ required=True,
+ help="A database config file for either a SQLite3 database or a PostgreSQL one.",
+ )
+
+ args = parser.parse_args()
+
+ logging_config = {
+ "level": logging.DEBUG if args.v else logging.INFO,
+ "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
+ }
+
+ logging.basicConfig(**logging_config)
+
+ # Load, process and sanity-check the config.
+ hs_config = yaml.safe_load(args.database_config)
+
+ if "database" not in hs_config:
+ sys.stderr.write("The configuration file must have a 'database' section.\n")
+ sys.exit(4)
+
+ config = HomeServerConfig()
+ config.parse_config_dict(hs_config, "", "")
+
+ # Create the database engine and a connection to it.
+ database_engine = create_engine(config.database_config)
+ db_conn = database_engine.module.connect(
+ **{
+ k: v
+ for k, v in config.database_config.get("args", {}).items()
+ if not k.startswith("cp_")
+ }
+ )
+
+ # Update the database to the latest schema.
+ prepare_database(db_conn, database_engine, config=config)
+ db_conn.commit()
+
+ # Instantiate and initialise the homeserver object.
+ hs = MockHomeserver(
+ config,
+ database_engine,
+ db_conn,
+ db_config=config.database_config,
+ )
+ # setup instantiates the store within the homeserver object.
+ hs.setup()
+ store = hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def run_background_updates():
+ yield store.run_background_updates(sleep=False)
+ # Stop the reactor to exit the script once every background update is run.
+ reactor.stop()
+
+ # Apply all background updates on the database.
+ reactor.callWhenRunning(lambda: run_as_background_process(
+ "background_updates", run_background_updates
+ ))
+
+ reactor.run()
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 3f942abdb6..54faed1e83 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -2,6 +2,7 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,9 +30,33 @@ import yaml
from twisted.enterprise import adbapi
from twisted.internet import defer, reactor
-from synapse.storage._base import LoggingTransaction, SQLBaseStore
+from synapse.config.homeserver import HomeServerConfig
+from synapse.logging.context import PreserveLoggingContext
+from synapse.storage._base import LoggingTransaction
+from synapse.storage.data_stores.main.client_ips import ClientIpBackgroundUpdateStore
+from synapse.storage.data_stores.main.deviceinbox import (
+ DeviceInboxBackgroundUpdateStore,
+)
+from synapse.storage.data_stores.main.devices import DeviceBackgroundUpdateStore
+from synapse.storage.data_stores.main.events_bg_updates import (
+ EventsBackgroundUpdatesStore,
+)
+from synapse.storage.data_stores.main.media_repository import (
+ MediaRepositoryBackgroundUpdateStore,
+)
+from synapse.storage.data_stores.main.registration import (
+ RegistrationBackgroundUpdateStore,
+)
+from synapse.storage.data_stores.main.roommember import RoomMemberBackgroundUpdateStore
+from synapse.storage.data_stores.main.search import SearchBackgroundUpdateStore
+from synapse.storage.data_stores.main.state import StateBackgroundUpdateStore
+from synapse.storage.data_stores.main.stats import StatsStore
+from synapse.storage.data_stores.main.user_directory import (
+ UserDirectoryBackgroundUpdateStore,
+)
from synapse.storage.engines import create_engine
from synapse.storage.prepare_database import prepare_database
+from synapse.util import Clock
logger = logging.getLogger("synapse_port_db")
@@ -43,6 +68,7 @@ BOOLEAN_COLUMNS = {
"presence_list": ["accepted"],
"presence_stream": ["currently_active"],
"public_room_list_stream": ["visibility"],
+ "devices": ["hidden"],
"device_lists_outbound_pokes": ["sent"],
"users_who_share_rooms": ["share_private"],
"groups": ["is_public"],
@@ -98,33 +124,24 @@ APPEND_ONLY_TABLES = [
end_error_exec_info = None
-class Store(object):
- """This object is used to pull out some of the convenience API from the
- Storage layer.
-
- *All* database interactions should go through this object.
- """
-
- def __init__(self, db_pool, engine):
- self.db_pool = db_pool
- self.database_engine = engine
-
- _simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
- _simple_insert = SQLBaseStore.__dict__["_simple_insert"]
-
- _simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
- _simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
- _simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
- _simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
- _simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
- _simple_select_one_onecol_txn = SQLBaseStore.__dict__[
- "_simple_select_one_onecol_txn"
- ]
-
- _simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
- _simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
- _simple_update_txn = SQLBaseStore.__dict__["_simple_update_txn"]
+class Store(
+ ClientIpBackgroundUpdateStore,
+ DeviceInboxBackgroundUpdateStore,
+ DeviceBackgroundUpdateStore,
+ EventsBackgroundUpdatesStore,
+ MediaRepositoryBackgroundUpdateStore,
+ RegistrationBackgroundUpdateStore,
+ RoomMemberBackgroundUpdateStore,
+ SearchBackgroundUpdateStore,
+ StateBackgroundUpdateStore,
+ UserDirectoryBackgroundUpdateStore,
+ StatsStore,
+):
+ def __init__(self, db_conn, hs):
+ super().__init__(db_conn, hs)
+ self.db_pool = hs.get_db_pool()
+ @defer.inlineCallbacks
def runInteraction(self, desc, func, *args, **kwargs):
def r(conn):
try:
@@ -150,7 +167,8 @@ class Store(object):
logger.debug("[TXN FAIL] {%s} %s", desc, e)
raise
- return self.db_pool.runWithConnection(r)
+ with PreserveLoggingContext():
+ return (yield self.db_pool.runWithConnection(r))
def execute(self, f, *args, **kwargs):
return self.runInteraction(f.__name__, f, *args, **kwargs)
@@ -176,6 +194,25 @@ class Store(object):
raise
+class MockHomeserver:
+ def __init__(self, config, database_engine, db_conn, db_pool):
+ self.database_engine = database_engine
+ self.db_conn = db_conn
+ self.db_pool = db_pool
+ self.clock = Clock(reactor)
+ self.config = config
+ self.hostname = config.server_name
+
+ def get_db_conn(self):
+ return self.db_conn
+
+ def get_db_pool(self):
+ return self.db_pool
+
+ def get_clock(self):
+ return self.clock
+
+
class Porter(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@@ -447,31 +484,75 @@ class Porter(object):
db_conn.commit()
+ return db_conn
+
@defer.inlineCallbacks
- def run(self):
- try:
- sqlite_db_pool = adbapi.ConnectionPool(
- self.sqlite_config["name"], **self.sqlite_config["args"]
- )
+ def build_db_store(self, config):
+ """Builds and returns a database store using the provided configuration.
- postgres_db_pool = adbapi.ConnectionPool(
- self.postgres_config["name"], **self.postgres_config["args"]
- )
+ Args:
+ config: The database configuration, i.e. a dict following the structure of
+ the "database" section of Synapse's configuration file.
+
+ Returns:
+ The built Store object.
+ """
+ engine = create_engine(config)
+
+ self.progress.set_state("Preparing %s" % config["name"])
+ conn = self.setup_db(config, engine)
+
+ db_pool = adbapi.ConnectionPool(
+ config["name"], **config["args"]
+ )
+
+ hs = MockHomeserver(self.hs_config, engine, conn, db_pool)
+
+ store = Store(conn, hs)
+
+ yield store.runInteraction(
+ "%s_engine.check_database" % config["name"],
+ engine.check_database,
+ )
- sqlite_engine = create_engine(sqlite_config)
- postgres_engine = create_engine(postgres_config)
+ return store
- self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
- self.postgres_store = Store(postgres_db_pool, postgres_engine)
+ @defer.inlineCallbacks
+ def run_background_updates_on_postgres(self):
+ # Manually apply all background updates on the PostgreSQL database.
+ postgres_ready = yield self.postgres_store.has_completed_background_updates()
+
+ if not postgres_ready:
+ # Only say that we're running background updates when there are background
+ # updates to run.
+ self.progress.set_state("Running background updates on PostgreSQL")
+
+ while not postgres_ready:
+ yield self.postgres_store.do_next_background_update(100)
+ postgres_ready = yield (
+ self.postgres_store.has_completed_background_updates()
+ )
- yield self.postgres_store.execute(postgres_engine.check_database)
+ @defer.inlineCallbacks
+ def run(self):
+ try:
+ self.sqlite_store = yield self.build_db_store(self.sqlite_config)
+
+ # Check if all background updates are done, abort if not.
+ updates_complete = yield self.sqlite_store.has_completed_background_updates()
+ if not updates_complete:
+ sys.stderr.write(
+ "Pending background updates exist in the SQLite3 database."
+ " Please start Synapse again and wait until every update has finished"
+ " before running this script.\n"
+ )
+ defer.returnValue(None)
- # Step 1. Set up databases.
- self.progress.set_state("Preparing SQLite3")
- self.setup_db(sqlite_config, sqlite_engine)
+ self.postgres_store = yield self.build_db_store(
+ self.hs_config.database_config
+ )
- self.progress.set_state("Preparing PostgreSQL")
- self.setup_db(postgres_config, postgres_engine)
+ yield self.run_background_updates_on_postgres()
self.progress.set_state("Creating port tables")
@@ -563,6 +644,8 @@ class Porter(object):
def conv(j, col):
if j in bool_cols:
return bool(col)
+ if isinstance(col, bytes):
+ return bytearray(col)
elif isinstance(col, string_types) and "\0" in col:
logger.warn(
"DROPPING ROW: NUL value in table %s col %s: %r",
@@ -926,18 +1009,24 @@ if __name__ == "__main__":
},
}
- postgres_config = yaml.safe_load(args.postgres_config)
+ hs_config = yaml.safe_load(args.postgres_config)
- if "database" in postgres_config:
- postgres_config = postgres_config["database"]
+ if "database" not in hs_config:
+ sys.stderr.write("The configuration file must have a 'database' section.\n")
+ sys.exit(4)
+
+ postgres_config = hs_config["database"]
if "name" not in postgres_config:
- sys.stderr.write("Malformed database config: no 'name'")
+ sys.stderr.write("Malformed database config: no 'name'\n")
sys.exit(2)
if postgres_config["name"] != "psycopg2":
- sys.stderr.write("Database must use 'psycopg2' connector.")
+ sys.stderr.write("Database must use the 'psycopg2' connector.\n")
sys.exit(3)
+ config = HomeServerConfig()
+ config.parse_config_dict(hs_config, "", "")
+
def start(stdscr=None):
if stdscr:
progress = CursesProgress(stdscr)
@@ -946,9 +1035,9 @@ if __name__ == "__main__":
porter = Porter(
sqlite_config=sqlite_config,
- postgres_config=postgres_config,
progress=progress,
batch_size=args.batch_size,
+ hs_config=config,
)
reactor.callWhenRunning(porter.run)
diff --git a/synapse/__init__.py b/synapse/__init__.py
index ee3313a41c..8587ffa76f 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
except ImportError:
pass
-__version__ = "1.4.1"
+__version__ = "1.5.0"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index cd347fbe1b..53f3bb0fa8 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -25,7 +25,13 @@ from twisted.internet import defer
import synapse.logging.opentracing as opentracing
import synapse.types
from synapse import event_auth
-from synapse.api.constants import EventTypes, JoinRules, Membership, UserTypes
+from synapse.api.constants import (
+ EventTypes,
+ JoinRules,
+ LimitBlockingTypes,
+ Membership,
+ UserTypes,
+)
from synapse.api.errors import (
AuthError,
Codes,
@@ -726,7 +732,7 @@ class Auth(object):
self.hs.config.hs_disabled_message,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
admin_contact=self.hs.config.admin_contact,
- limit_type=self.hs.config.hs_disabled_limit_type,
+ limit_type=LimitBlockingTypes.HS_DISABLED,
)
if self.hs.config.limit_usage_by_mau is True:
assert not (user_id and threepid)
@@ -759,5 +765,5 @@ class Auth(object):
"Monthly Active User Limit Exceeded",
admin_contact=self.hs.config.admin_contact,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
- limit_type="monthly_active_user",
+ limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER,
)
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 60e99e4663..312196675e 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -131,3 +131,10 @@ class RelationTypes(object):
ANNOTATION = "m.annotation"
REPLACE = "m.replace"
REFERENCE = "m.reference"
+
+
+class LimitBlockingTypes(object):
+ """Reasons that a server may be blocked"""
+
+ MONTHLY_ACTIVE_USER = "monthly_active_user"
+ HS_DISABLED = "hs_disabled"
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index ab41623b2b..1f6dac69da 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -300,7 +300,7 @@ class RegistrationConfig(Config):
# If a delegate is specified, the config option public_baseurl must also be filled out.
#
account_threepid_delegates:
- #email: https://example.com # Delegate email sending to example.org
+ #email: https://example.com # Delegate email sending to example.com
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
# Users who register on this homeserver will automatically be joined
diff --git a/synapse/config/server.py b/synapse/config/server.py
index c942841578..d556df308d 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -171,6 +171,7 @@ class ServerConfig(Config):
)
self.mau_trial_days = config.get("mau_trial_days", 0)
+ self.mau_limit_alerting = config.get("mau_limit_alerting", True)
# How long to keep redacted events in the database in unredacted form
# before redacting them.
@@ -192,7 +193,6 @@ class ServerConfig(Config):
# Options to disable HS
self.hs_disabled = config.get("hs_disabled", False)
self.hs_disabled_message = config.get("hs_disabled_message", "")
- self.hs_disabled_limit_type = config.get("hs_disabled_limit_type", "")
# Admin uri to direct users at should their instance become blocked
# due to resource constraints
@@ -675,7 +675,6 @@ class ServerConfig(Config):
#
#hs_disabled: false
#hs_disabled_message: 'Human readable reason for why the HS is blocked'
- #hs_disabled_limit_type: 'error code(str), to help clients decode reason'
# Monthly Active User Blocking
#
@@ -695,9 +694,16 @@ class ServerConfig(Config):
# sign up in a short space of time never to return after their initial
# session.
#
+ # 'mau_limit_alerting' is a means of limiting client side alerting
+ # should the mau limit be reached. This is useful for small instances
+ # where the admin has 5 mau seats (say) for 5 specific people and no
+ # interest increasing the mau limit further. Defaults to True, which
+ # means that alerting is enabled
+ #
#limit_usage_by_mau: false
#max_mau_value: 50
#mau_trial_days: 2
+ #mau_limit_alerting: false
# If enabled, the metrics for the number of monthly active users will
# be populated, however no one will be limited. If limit_usage_by_mau
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index 694fb2c816..ccaa8a9920 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -125,9 +125,11 @@ def compute_event_signature(event_dict, signature_name, signing_key):
redact_json = prune_event_dict(event_dict)
redact_json.pop("age_ts", None)
redact_json.pop("unsigned", None)
- logger.debug("Signing event: %s", encode_canonical_json(redact_json))
+ if logger.isEnabledFor(logging.DEBUG):
+ logger.debug("Signing event: %s", encode_canonical_json(redact_json))
redact_json = sign_json(redact_json, signature_name, signing_key)
- logger.debug("Signed event: %s", encode_canonical_json(redact_json))
+ if logger.isEnabledFor(logging.DEBUG):
+ logger.debug("Signed event: %s", encode_canonical_json(redact_json))
return redact_json["signatures"]
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 4e91df60e6..e7b722547b 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -493,8 +493,7 @@ def _check_power_levels(event, auth_events):
new_level_too_big = new_level is not None and new_level > user_level
if old_level_too_big or new_level_too_big:
raise AuthError(
- 403,
- "You don't have permission to add ops level greater " "than your own",
+ 403, "You don't have permission to add ops level greater than your own"
)
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index acbcbeeced..27cd8a63ff 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -12,9 +12,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
from six import iteritems
+import attr
from frozendict import frozendict
from twisted.internet import defer
@@ -22,7 +22,8 @@ from twisted.internet import defer
from synapse.logging.context import make_deferred_yieldable, run_in_background
-class EventContext(object):
+@attr.s(slots=True)
+class EventContext:
"""
Attributes:
state_group (int|None): state group id, if the state has been stored
@@ -31,9 +32,6 @@ class EventContext(object):
rejected (bool|str): A rejection reason if the event was rejected, else
False
- push_actions (list[(str, list[object])]): list of (user_id, actions)
- tuples
-
prev_group (int): Previously persisted state group. ``None`` for an
outlier.
delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
@@ -42,6 +40,8 @@ class EventContext(object):
prev_state_events (?): XXX: is this ever set to anything other than
the empty list?
+ app_service: FIXME
+
_current_state_ids (dict[(str, str), str]|None):
The current state map including the current event. None if outlier
or we haven't fetched the state from DB yet.
@@ -67,49 +67,33 @@ class EventContext(object):
Only set when state has not been fetched yet.
"""
- __slots__ = [
- "state_group",
- "rejected",
- "prev_group",
- "delta_ids",
- "prev_state_events",
- "app_service",
- "_current_state_ids",
- "_prev_state_ids",
- "_prev_state_id",
- "_event_type",
- "_event_state_key",
- "_fetching_state_deferred",
- ]
-
- def __init__(self):
- self.prev_state_events = []
- self.rejected = False
- self.app_service = None
+ state_group = attr.ib(default=None)
+ rejected = attr.ib(default=False)
+ prev_group = attr.ib(default=None)
+ delta_ids = attr.ib(default=None)
+ prev_state_events = attr.ib(default=attr.Factory(list))
+ app_service = attr.ib(default=None)
+
+ _current_state_ids = attr.ib(default=None)
+ _prev_state_ids = attr.ib(default=None)
+ _prev_state_id = attr.ib(default=None)
+
+ _event_type = attr.ib(default=None)
+ _event_state_key = attr.ib(default=None)
+ _fetching_state_deferred = attr.ib(default=None)
@staticmethod
def with_state(
state_group, current_state_ids, prev_state_ids, prev_group=None, delta_ids=None
):
- context = EventContext()
-
- # The current state including the current event
- context._current_state_ids = current_state_ids
- # The current state excluding the current event
- context._prev_state_ids = prev_state_ids
- context.state_group = state_group
-
- context._prev_state_id = None
- context._event_type = None
- context._event_state_key = None
- context._fetching_state_deferred = defer.succeed(None)
-
- # A previously persisted state group and a delta between that
- # and this state.
- context.prev_group = prev_group
- context.delta_ids = delta_ids
-
- return context
+ return EventContext(
+ current_state_ids=current_state_ids,
+ prev_state_ids=prev_state_ids,
+ state_group=state_group,
+ fetching_state_deferred=defer.succeed(None),
+ prev_group=prev_group,
+ delta_ids=delta_ids,
+ )
@defer.inlineCallbacks
def serialize(self, event, store):
@@ -157,24 +141,18 @@ class EventContext(object):
Returns:
EventContext
"""
- context = EventContext()
-
- # We use the state_group and prev_state_id stuff to pull the
- # current_state_ids out of the DB and construct prev_state_ids.
- context._prev_state_id = input["prev_state_id"]
- context._event_type = input["event_type"]
- context._event_state_key = input["event_state_key"]
-
- context._current_state_ids = None
- context._prev_state_ids = None
- context._fetching_state_deferred = None
-
- context.state_group = input["state_group"]
- context.prev_group = input["prev_group"]
- context.delta_ids = _decode_state_dict(input["delta_ids"])
-
- context.rejected = input["rejected"]
- context.prev_state_events = input["prev_state_events"]
+ context = EventContext(
+ # We use the state_group and prev_state_id stuff to pull the
+ # current_state_ids out of the DB and construct prev_state_ids.
+ prev_state_id=input["prev_state_id"],
+ event_type=input["event_type"],
+ event_state_key=input["event_state_key"],
+ state_group=input["state_group"],
+ prev_group=input["prev_group"],
+ delta_ids=_decode_state_dict(input["delta_ids"]),
+ rejected=input["rejected"],
+ prev_state_events=input["prev_state_events"],
+ )
app_service_id = input["app_service_id"]
if app_service_id:
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 5a1e23a145..223aace0d9 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -278,9 +278,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
pdu_to_check.sender_domain,
e.getErrorMessage(),
)
- # XX not really sure if these are the right codes, but they are what
- # we've done for ages
- raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
+ raise SynapseError(403, errmsg, Codes.FORBIDDEN)
for p, d in zip(pdus_to_check_sender, more_deferreds):
d.addErrback(sender_err, p)
@@ -314,8 +312,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
"event id %s: unable to verify signature for event id domain: %s"
% (pdu_to_check.pdu.event_id, e.getErrorMessage())
)
- # XX as above: not really sure if these are the right codes
- raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
+ raise SynapseError(403, errmsg, Codes.FORBIDDEN)
for p, d in zip(pdus_to_check_event_id, more_deferreds):
d.addErrback(event_err, p)
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 5b22a39b7f..f5c1632916 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -196,7 +196,7 @@ class FederationClient(FederationBase):
dest, room_id, extremities, limit
)
- logger.debug("backfill transaction_data=%s", repr(transaction_data))
+ logger.debug("backfill transaction_data=%r", transaction_data)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 21e52c9695..d5a19764d2 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -21,7 +21,6 @@ from six import iteritems
from canonicaljson import json
from prometheus_client import Counter
-from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
@@ -86,14 +85,12 @@ class FederationServer(FederationBase):
# come in waves.
self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
- @defer.inlineCallbacks
- @log_function
- def on_backfill_request(self, origin, room_id, versions, limit):
- with (yield self._server_linearizer.queue((origin, room_id))):
+ async def on_backfill_request(self, origin, room_id, versions, limit):
+ with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
+ await self.check_server_matches_acl(origin_host, room_id)
- pdus = yield self.handler.on_backfill_request(
+ pdus = await self.handler.on_backfill_request(
origin, room_id, versions, limit
)
@@ -101,9 +98,7 @@ class FederationServer(FederationBase):
return 200, res
- @defer.inlineCallbacks
- @log_function
- def on_incoming_transaction(self, origin, transaction_data):
+ async def on_incoming_transaction(self, origin, transaction_data):
# keep this as early as possible to make the calculated origin ts as
# accurate as possible.
request_time = self._clock.time_msec()
@@ -118,18 +113,17 @@ class FederationServer(FederationBase):
# use a linearizer to ensure that we don't process the same transaction
# multiple times in parallel.
with (
- yield self._transaction_linearizer.queue(
+ await self._transaction_linearizer.queue(
(origin, transaction.transaction_id)
)
):
- result = yield self._handle_incoming_transaction(
+ result = await self._handle_incoming_transaction(
origin, transaction, request_time
)
return result
- @defer.inlineCallbacks
- def _handle_incoming_transaction(self, origin, transaction, request_time):
+ async def _handle_incoming_transaction(self, origin, transaction, request_time):
""" Process an incoming transaction and return the HTTP response
Args:
@@ -140,7 +134,7 @@ class FederationServer(FederationBase):
Returns:
Deferred[(int, object)]: http response code and body
"""
- response = yield self.transaction_actions.have_responded(origin, transaction)
+ response = await self.transaction_actions.have_responded(origin, transaction)
if response:
logger.debug(
@@ -151,7 +145,7 @@ class FederationServer(FederationBase):
logger.debug("[%s] Transaction is new", transaction.transaction_id)
- # Reject if PDU count > 50 and EDU count > 100
+ # Reject if PDU count > 50 or EDU count > 100
if len(transaction.pdus) > 50 or (
hasattr(transaction, "edus") and len(transaction.edus) > 100
):
@@ -159,7 +153,7 @@ class FederationServer(FederationBase):
logger.info("Transaction PDU or EDU count too large. Returning 400")
response = {}
- yield self.transaction_actions.set_response(
+ await self.transaction_actions.set_response(
origin, transaction, 400, response
)
return 400, response
@@ -195,7 +189,7 @@ class FederationServer(FederationBase):
continue
try:
- room_version = yield self.store.get_room_version(room_id)
+ room_version = await self.store.get_room_version(room_id)
except NotFoundError:
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
continue
@@ -221,11 +215,10 @@ class FederationServer(FederationBase):
# require callouts to other servers to fetch missing events), but
# impose a limit to avoid going too crazy with ram/cpu.
- @defer.inlineCallbacks
- def process_pdus_for_room(room_id):
+ async def process_pdus_for_room(room_id):
logger.debug("Processing PDUs for %s", room_id)
try:
- yield self.check_server_matches_acl(origin_host, room_id)
+ await self.check_server_matches_acl(origin_host, room_id)
except AuthError as e:
logger.warn("Ignoring PDUs for room %s from banned server", room_id)
for pdu in pdus_by_room[room_id]:
@@ -237,7 +230,7 @@ class FederationServer(FederationBase):
event_id = pdu.event_id
with nested_logging_context(event_id):
try:
- yield self._handle_received_pdu(origin, pdu)
+ await self._handle_received_pdu(origin, pdu)
pdu_results[event_id] = {}
except FederationError as e:
logger.warn("Error handling PDU %s: %s", event_id, e)
@@ -251,36 +244,33 @@ class FederationServer(FederationBase):
exc_info=(f.type, f.value, f.getTracebackObject()),
)
- yield concurrently_execute(
+ await concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
)
if hasattr(transaction, "edus"):
for edu in (Edu(**x) for x in transaction.edus):
- yield self.received_edu(origin, edu.edu_type, edu.content)
+ await self.received_edu(origin, edu.edu_type, edu.content)
response = {"pdus": pdu_results}
logger.debug("Returning: %s", str(response))
- yield self.transaction_actions.set_response(origin, transaction, 200, response)
+ await self.transaction_actions.set_response(origin, transaction, 200, response)
return 200, response
- @defer.inlineCallbacks
- def received_edu(self, origin, edu_type, content):
+ async def received_edu(self, origin, edu_type, content):
received_edus_counter.inc()
- yield self.registry.on_edu(edu_type, origin, content)
+ await self.registry.on_edu(edu_type, origin, content)
- @defer.inlineCallbacks
- @log_function
- def on_context_state_request(self, origin, room_id, event_id):
+ async def on_context_state_request(self, origin, room_id, event_id):
if not event_id:
raise NotImplementedError("Specify an event")
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
+ await self.check_server_matches_acl(origin_host, room_id)
- in_room = yield self.auth.check_host_in_room(room_id, origin)
+ in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
@@ -289,8 +279,8 @@ class FederationServer(FederationBase):
# in the cache so we could return it without waiting for the linearizer
# - but that's non-trivial to get right, and anyway somewhat defeats
# the point of the linearizer.
- with (yield self._server_linearizer.queue((origin, room_id))):
- resp = yield self._state_resp_cache.wrap(
+ with (await self._server_linearizer.queue((origin, room_id))):
+ resp = await self._state_resp_cache.wrap(
(room_id, event_id),
self._on_context_state_request_compute,
room_id,
@@ -299,65 +289,58 @@ class FederationServer(FederationBase):
return 200, resp
- @defer.inlineCallbacks
- def on_state_ids_request(self, origin, room_id, event_id):
+ async def on_state_ids_request(self, origin, room_id, event_id):
if not event_id:
raise NotImplementedError("Specify an event")
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
+ await self.check_server_matches_acl(origin_host, room_id)
- in_room = yield self.auth.check_host_in_room(room_id, origin)
+ in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
- state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
- auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
+ state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
+ auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
- @defer.inlineCallbacks
- def _on_context_state_request_compute(self, room_id, event_id):
- pdus = yield self.handler.get_state_for_pdu(room_id, event_id)
- auth_chain = yield self.store.get_auth_chain([pdu.event_id for pdu in pdus])
+ async def _on_context_state_request_compute(self, room_id, event_id):
+ pdus = await self.handler.get_state_for_pdu(room_id, event_id)
+ auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
return {
"pdus": [pdu.get_pdu_json() for pdu in pdus],
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
}
- @defer.inlineCallbacks
- @log_function
- def on_pdu_request(self, origin, event_id):
- pdu = yield self.handler.get_persisted_pdu(origin, event_id)
+ async def on_pdu_request(self, origin, event_id):
+ pdu = await self.handler.get_persisted_pdu(origin, event_id)
if pdu:
return 200, self._transaction_from_pdus([pdu]).get_dict()
else:
return 404, ""
- @defer.inlineCallbacks
- def on_query_request(self, query_type, args):
+ async def on_query_request(self, query_type, args):
received_queries_counter.labels(query_type).inc()
- resp = yield self.registry.on_query(query_type, args)
+ resp = await self.registry.on_query(query_type, args)
return 200, resp
- @defer.inlineCallbacks
- def on_make_join_request(self, origin, room_id, user_id, supported_versions):
+ async def on_make_join_request(self, origin, room_id, user_id, supported_versions):
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
+ await self.check_server_matches_acl(origin_host, room_id)
- room_version = yield self.store.get_room_version(room_id)
+ room_version = await self.store.get_room_version(room_id)
if room_version not in supported_versions:
logger.warn("Room version %s not in %s", room_version, supported_versions)
raise IncompatibleRoomVersionError(room_version=room_version)
- pdu = yield self.handler.on_make_join_request(origin, room_id, user_id)
+ pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
- @defer.inlineCallbacks
- def on_invite_request(self, origin, content, room_version):
+ async def on_invite_request(self, origin, content, room_version):
if room_version not in KNOWN_ROOM_VERSIONS:
raise SynapseError(
400,
@@ -369,24 +352,27 @@ class FederationServer(FederationBase):
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, pdu.room_id)
- ret_pdu = yield self.handler.on_invite_request(origin, pdu)
+ await self.check_server_matches_acl(origin_host, pdu.room_id)
+ pdu = await self._check_sigs_and_hash(room_version, pdu)
+ ret_pdu = await self.handler.on_invite_request(origin, pdu)
time_now = self._clock.time_msec()
return {"event": ret_pdu.get_pdu_json(time_now)}
- @defer.inlineCallbacks
- def on_send_join_request(self, origin, content, room_id):
+ async def on_send_join_request(self, origin, content, room_id):
logger.debug("on_send_join_request: content: %s", content)
- room_version = yield self.store.get_room_version(room_id)
+ room_version = await self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, pdu.room_id)
+ await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
- res_pdus = yield self.handler.on_send_join_request(origin, pdu)
+
+ pdu = await self._check_sigs_and_hash(room_version, pdu)
+
+ res_pdus = await self.handler.on_send_join_request(origin, pdu)
time_now = self._clock.time_msec()
return (
200,
@@ -398,45 +384,44 @@ class FederationServer(FederationBase):
},
)
- @defer.inlineCallbacks
- def on_make_leave_request(self, origin, room_id, user_id):
+ async def on_make_leave_request(self, origin, room_id, user_id):
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
- pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id)
+ await self.check_server_matches_acl(origin_host, room_id)
+ pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
- room_version = yield self.store.get_room_version(room_id)
+ room_version = await self.store.get_room_version(room_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
- @defer.inlineCallbacks
- def on_send_leave_request(self, origin, content, room_id):
+ async def on_send_leave_request(self, origin, content, room_id):
logger.debug("on_send_leave_request: content: %s", content)
- room_version = yield self.store.get_room_version(room_id)
+ room_version = await self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, pdu.room_id)
+ await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
- yield self.handler.on_send_leave_request(origin, pdu)
+
+ pdu = await self._check_sigs_and_hash(room_version, pdu)
+
+ await self.handler.on_send_leave_request(origin, pdu)
return 200, {}
- @defer.inlineCallbacks
- def on_event_auth(self, origin, room_id, event_id):
- with (yield self._server_linearizer.queue((origin, room_id))):
+ async def on_event_auth(self, origin, room_id, event_id):
+ with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
+ await self.check_server_matches_acl(origin_host, room_id)
time_now = self._clock.time_msec()
- auth_pdus = yield self.handler.on_event_auth(event_id)
+ auth_pdus = await self.handler.on_event_auth(event_id)
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
return 200, res
- @defer.inlineCallbacks
- def on_query_auth_request(self, origin, content, room_id, event_id):
+ async def on_query_auth_request(self, origin, content, room_id, event_id):
"""
Content is a dict with keys::
auth_chain (list): A list of events that give the auth chain.
@@ -455,22 +440,22 @@ class FederationServer(FederationBase):
Returns:
Deferred: Results in `dict` with the same format as `content`
"""
- with (yield self._server_linearizer.queue((origin, room_id))):
+ with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
+ await self.check_server_matches_acl(origin_host, room_id)
- room_version = yield self.store.get_room_version(room_id)
+ room_version = await self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
auth_chain = [
event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
]
- signed_auth = yield self._check_sigs_and_hash_and_fetch(
+ signed_auth = await self._check_sigs_and_hash_and_fetch(
origin, auth_chain, outlier=True, room_version=room_version
)
- ret = yield self.handler.on_query_auth(
+ ret = await self.handler.on_query_auth(
origin,
event_id,
room_id,
@@ -496,16 +481,14 @@ class FederationServer(FederationBase):
return self.on_query_request("user_devices", user_id)
@trace
- @defer.inlineCallbacks
- @log_function
- def on_claim_client_keys(self, origin, content):
+ async def on_claim_client_keys(self, origin, content):
query = []
for user_id, device_keys in content.get("one_time_keys", {}).items():
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
- results = yield self.store.claim_e2e_one_time_keys(query)
+ results = await self.store.claim_e2e_one_time_keys(query)
json_result = {}
for user_id, device_keys in results.items():
@@ -529,14 +512,12 @@ class FederationServer(FederationBase):
return {"one_time_keys": json_result}
- @defer.inlineCallbacks
- @log_function
- def on_get_missing_events(
+ async def on_get_missing_events(
self, origin, room_id, earliest_events, latest_events, limit
):
- with (yield self._server_linearizer.queue((origin, room_id))):
+ with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
+ await self.check_server_matches_acl(origin_host, room_id)
logger.info(
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
@@ -546,7 +527,7 @@ class FederationServer(FederationBase):
limit,
)
- missing_events = yield self.handler.on_get_missing_events(
+ missing_events = await self.handler.on_get_missing_events(
origin, room_id, earliest_events, latest_events, limit
)
@@ -579,8 +560,7 @@ class FederationServer(FederationBase):
destination=None,
)
- @defer.inlineCallbacks
- def _handle_received_pdu(self, origin, pdu):
+ async def _handle_received_pdu(self, origin, pdu):
""" Process a PDU received in a federation /send/ transaction.
If the event is invalid, then this method throws a FederationError.
@@ -633,37 +613,34 @@ class FederationServer(FederationBase):
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
# We've already checked that we know the room version by this point
- room_version = yield self.store.get_room_version(pdu.room_id)
+ room_version = await self.store.get_room_version(pdu.room_id)
# Check signature.
try:
- pdu = yield self._check_sigs_and_hash(room_version, pdu)
+ pdu = await self._check_sigs_and_hash(room_version, pdu)
except SynapseError as e:
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
- yield self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
+ await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
def __str__(self):
return "<ReplicationLayer(%s)>" % self.server_name
- @defer.inlineCallbacks
- def exchange_third_party_invite(
+ async def exchange_third_party_invite(
self, sender_user_id, target_user_id, room_id, signed
):
- ret = yield self.handler.exchange_third_party_invite(
+ ret = await self.handler.exchange_third_party_invite(
sender_user_id, target_user_id, room_id, signed
)
return ret
- @defer.inlineCallbacks
- def on_exchange_third_party_invite_request(self, room_id, event_dict):
- ret = yield self.handler.on_exchange_third_party_invite_request(
+ async def on_exchange_third_party_invite_request(self, room_id, event_dict):
+ ret = await self.handler.on_exchange_third_party_invite_request(
room_id, event_dict
)
return ret
- @defer.inlineCallbacks
- def check_server_matches_acl(self, server_name, room_id):
+ async def check_server_matches_acl(self, server_name, room_id):
"""Check if the given server is allowed by the server ACLs in the room
Args:
@@ -673,13 +650,13 @@ class FederationServer(FederationBase):
Raises:
AuthError if the server does not match the ACL
"""
- state_ids = yield self.store.get_current_state_ids(room_id)
+ state_ids = await self.store.get_current_state_ids(room_id)
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
if not acl_event_id:
return
- acl_event = yield self.store.get_event(acl_event_id)
+ acl_event = await self.store.get_event(acl_event_id)
if server_matches_acl_event(server_name, acl_event):
return
@@ -792,15 +769,14 @@ class FederationHandlerRegistry(object):
self.query_handlers[query_type] = handler
- @defer.inlineCallbacks
- def on_edu(self, edu_type, origin, content):
+ async def on_edu(self, edu_type, origin, content):
handler = self.edu_handlers.get(edu_type)
if not handler:
logger.warn("No handler registered for EDU type %s", edu_type)
with start_active_span_from_edu(content, "handle_edu"):
try:
- yield handler(origin, content)
+ await handler(origin, content)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception:
@@ -833,7 +809,7 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
super(ReplicationFederationHandlerRegistry, self).__init__()
- def on_edu(self, edu_type, origin, content):
+ async def on_edu(self, edu_type, origin, content):
"""Overrides FederationHandlerRegistry
"""
if not self.config.use_presence and edu_type == "m.presence":
@@ -841,17 +817,17 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
handler = self.edu_handlers.get(edu_type)
if handler:
- return super(ReplicationFederationHandlerRegistry, self).on_edu(
+ return await super(ReplicationFederationHandlerRegistry, self).on_edu(
edu_type, origin, content
)
- return self._send_edu(edu_type=edu_type, origin=origin, content=content)
+ return await self._send_edu(edu_type=edu_type, origin=origin, content=content)
- def on_query(self, query_type, args):
+ async def on_query(self, query_type, args):
"""Overrides FederationHandlerRegistry
"""
handler = self.query_handlers.get(query_type)
if handler:
- return handler(args)
+ return await handler(args)
- return self._get_query_client(query_type=query_type, args=args)
+ return await self._get_query_client(query_type=query_type, args=args)
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 454456a52d..ced4925a98 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -36,6 +36,8 @@ from six import iteritems
from sortedcontainers import SortedDict
+from twisted.internet import defer
+
from synapse.metrics import LaterGauge
from synapse.storage.presence import UserPresenceState
from synapse.util.metrics import Measure
@@ -212,7 +214,7 @@ class FederationRemoteSendQueue(object):
receipt (synapse.types.ReadReceipt):
"""
# nothing to do here: the replication listener will handle it.
- pass
+ return defer.succeed(None)
def send_presence(self, states):
"""As per FederationSender
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 7b18408144..920fa86853 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -122,10 +122,10 @@ class TransportLayerClient(object):
Deferred: Results in a dict received from the remote homeserver.
"""
logger.debug(
- "backfill dest=%s, room_id=%s, event_tuples=%s, limit=%s",
+ "backfill dest=%s, room_id=%s, event_tuples=%r, limit=%s",
destination,
room_id,
- repr(event_tuples),
+ event_tuples,
str(limit),
)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 7e9c9aee13..08276fdebf 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1223,7 +1223,6 @@ class FederationHandler(BaseHandler):
Returns:
Deferred[FrozenEvent]
"""
-
if get_domain_from_id(user_id) != origin:
logger.info(
"Got /make_join request for user %r from different origin %s, ignoring",
@@ -1252,7 +1251,7 @@ class FederationHandler(BaseHandler):
builder=builder
)
except AuthError as e:
- logger.warn("Failed to create join %r because %s", event, e)
+ logger.warn("Failed to create join to %s because %s", room_id, e)
raise e
event_allowed = yield self.third_party_event_rules.check_event_allowed(
@@ -1281,11 +1280,20 @@ class FederationHandler(BaseHandler):
event = pdu
logger.debug(
- "on_send_join_request: Got event: %s, signatures: %s",
+ "on_send_join_request from %s: Got event: %s, signatures: %s",
+ origin,
event.event_id,
event.signatures,
)
+ if get_domain_from_id(event.sender) != origin:
+ logger.info(
+ "Got /send_join request for user %r from different origin %s",
+ event.sender,
+ origin,
+ )
+ raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
+
event.internal_metadata.outlier = False
# Send this event on behalf of the origin server.
#
@@ -1504,6 +1512,14 @@ class FederationHandler(BaseHandler):
event.signatures,
)
+ if get_domain_from_id(event.sender) != origin:
+ logger.info(
+ "Got /send_leave request for user %r from different origin %s",
+ event.sender,
+ origin,
+ )
+ raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
+
event.internal_metadata.outlier = False
context = yield self._handle_new_event(origin, event)
diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py
index 3e4d8c93a4..e3b528d271 100644
--- a/synapse/handlers/read_marker.py
+++ b/synapse/handlers/read_marker.py
@@ -15,8 +15,6 @@
import logging
-from twisted.internet import defer
-
from synapse.util.async_helpers import Linearizer
from ._base import BaseHandler
@@ -32,8 +30,7 @@ class ReadMarkerHandler(BaseHandler):
self.read_marker_linearizer = Linearizer(name="read_marker")
self.notifier = hs.get_notifier()
- @defer.inlineCallbacks
- def received_client_read_marker(self, room_id, user_id, event_id):
+ async def received_client_read_marker(self, room_id, user_id, event_id):
"""Updates the read marker for a given user in a given room if the event ID given
is ahead in the stream relative to the current read marker.
@@ -41,8 +38,8 @@ class ReadMarkerHandler(BaseHandler):
the read marker has changed.
"""
- with (yield self.read_marker_linearizer.queue((room_id, user_id))):
- existing_read_marker = yield self.store.get_account_data_for_room_and_type(
+ with await self.read_marker_linearizer.queue((room_id, user_id)):
+ existing_read_marker = await self.store.get_account_data_for_room_and_type(
user_id, room_id, "m.fully_read"
)
@@ -50,13 +47,13 @@ class ReadMarkerHandler(BaseHandler):
if existing_read_marker:
# Only update if the new marker is ahead in the stream
- should_update = yield self.store.is_event_after(
+ should_update = await self.store.is_event_after(
event_id, existing_read_marker["event_id"]
)
if should_update:
content = {"event_id": event_id}
- max_id = yield self.store.add_account_data_to_room(
+ max_id = await self.store.add_account_data_to_room(
user_id, room_id, "m.fully_read", content
)
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 6854c751a6..9283c039e3 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -18,6 +18,7 @@ from twisted.internet import defer
from synapse.handlers._base import BaseHandler
from synapse.types import ReadReceipt, get_domain_from_id
+from synapse.util.async_helpers import maybe_awaitable
logger = logging.getLogger(__name__)
@@ -36,8 +37,7 @@ class ReceiptsHandler(BaseHandler):
self.clock = self.hs.get_clock()
self.state = hs.get_state_handler()
- @defer.inlineCallbacks
- def _received_remote_receipt(self, origin, content):
+ async def _received_remote_receipt(self, origin, content):
"""Called when we receive an EDU of type m.receipt from a remote HS.
"""
receipts = []
@@ -62,17 +62,16 @@ class ReceiptsHandler(BaseHandler):
)
)
- yield self._handle_new_receipts(receipts)
+ await self._handle_new_receipts(receipts)
- @defer.inlineCallbacks
- def _handle_new_receipts(self, receipts):
+ async def _handle_new_receipts(self, receipts):
"""Takes a list of receipts, stores them and informs the notifier.
"""
min_batch_id = None
max_batch_id = None
for receipt in receipts:
- res = yield self.store.insert_receipt(
+ res = await self.store.insert_receipt(
receipt.room_id,
receipt.receipt_type,
receipt.user_id,
@@ -99,14 +98,15 @@ class ReceiptsHandler(BaseHandler):
self.notifier.on_new_event("receipt_key", max_batch_id, rooms=affected_room_ids)
# Note that the min here shouldn't be relied upon to be accurate.
- yield self.hs.get_pusherpool().on_new_receipts(
- min_batch_id, max_batch_id, affected_room_ids
+ await maybe_awaitable(
+ self.hs.get_pusherpool().on_new_receipts(
+ min_batch_id, max_batch_id, affected_room_ids
+ )
)
return True
- @defer.inlineCallbacks
- def received_client_receipt(self, room_id, receipt_type, user_id, event_id):
+ async def received_client_receipt(self, room_id, receipt_type, user_id, event_id):
"""Called when a client tells us a local user has read up to the given
event_id in the room.
"""
@@ -118,24 +118,11 @@ class ReceiptsHandler(BaseHandler):
data={"ts": int(self.clock.time_msec())},
)
- is_new = yield self._handle_new_receipts([receipt])
+ is_new = await self._handle_new_receipts([receipt])
if not is_new:
return
- yield self.federation.send_read_receipt(receipt)
-
- @defer.inlineCallbacks
- def get_receipts_for_room(self, room_id, to_key):
- """Gets all receipts for a room, upto the given key.
- """
- result = yield self.store.get_linearized_receipts_for_room(
- room_id, to_key=to_key
- )
-
- if not result:
- return []
-
- return result
+ await self.federation.send_read_receipt(receipt)
class ReceiptEventSource(object):
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 466daf9202..26bc276692 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -45,6 +45,8 @@ class StatsHandler(StateDeltasHandler):
self.is_mine_id = hs.is_mine_id
self.stats_bucket_size = hs.config.stats_bucket_size
+ self.stats_enabled = hs.config.stats_enabled
+
# The current position in the current_state_delta stream
self.pos = None
@@ -61,7 +63,7 @@ class StatsHandler(StateDeltasHandler):
def notify_new_event(self):
"""Called when there may be more deltas to process
"""
- if not self.hs.config.stats_enabled or self._is_processing:
+ if not self.stats_enabled or self._is_processing:
return
self._is_processing = True
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 22491f3700..2bbdd11941 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -79,7 +79,7 @@ class BulkPushRuleEvaluator(object):
dict of user_id -> push_rules
"""
room_id = event.room_id
- rules_for_room = self._get_rules_for_room(room_id)
+ rules_for_room = yield self._get_rules_for_room(room_id)
rules_by_user = yield rules_for_room.get_rules(event, context)
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index 03560c1f0e..9be37cd998 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -110,14 +110,14 @@ class ReplicationEndpoint(object):
return {}
@abc.abstractmethod
- def _handle_request(self, request, **kwargs):
+ async def _handle_request(self, request, **kwargs):
"""Handle incoming request.
This is called with the request object and PATH_ARGS.
Returns:
- Deferred[dict]: A JSON serialisable dict to be used as response
- body of request.
+ tuple[int, dict]: HTTP status code and a JSON serialisable dict
+ to be used as response body of request.
"""
pass
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index 2f16955954..9af4e7e173 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -82,8 +82,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
return payload
- @defer.inlineCallbacks
- def _handle_request(self, request):
+ async def _handle_request(self, request):
with Measure(self.clock, "repl_fed_send_events_parse"):
content = parse_json_object_from_request(request)
@@ -101,15 +100,13 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
EventType = event_type_from_format_version(format_ver)
event = EventType(event_dict, internal_metadata, rejected_reason)
- context = yield EventContext.deserialize(
- self.store, event_payload["context"]
- )
+ context = EventContext.deserialize(self.store, event_payload["context"])
event_and_contexts.append((event, context))
logger.info("Got %d events from federation", len(event_and_contexts))
- yield self.federation_handler.persist_events_and_notify(
+ await self.federation_handler.persist_events_and_notify(
event_and_contexts, backfilled
)
@@ -144,8 +141,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
def _serialize_payload(edu_type, origin, content):
return {"origin": origin, "content": content}
- @defer.inlineCallbacks
- def _handle_request(self, request, edu_type):
+ async def _handle_request(self, request, edu_type):
with Measure(self.clock, "repl_fed_send_edu_parse"):
content = parse_json_object_from_request(request)
@@ -154,7 +150,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
logger.info("Got %r edu from %s", edu_type, origin)
- result = yield self.registry.on_edu(edu_type, origin, edu_content)
+ result = await self.registry.on_edu(edu_type, origin, edu_content)
return 200, result
@@ -193,8 +189,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
"""
return {"args": args}
- @defer.inlineCallbacks
- def _handle_request(self, request, query_type):
+ async def _handle_request(self, request, query_type):
with Measure(self.clock, "repl_fed_query_parse"):
content = parse_json_object_from_request(request)
@@ -202,7 +197,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
logger.info("Got %r query", query_type)
- result = yield self.registry.on_query(query_type, args)
+ result = await self.registry.on_query(query_type, args)
return 200, result
@@ -234,9 +229,8 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
"""
return {}
- @defer.inlineCallbacks
- def _handle_request(self, request, room_id):
- yield self.store.clean_room_for_join(room_id)
+ async def _handle_request(self, request, room_id):
+ await self.store.clean_room_for_join(room_id)
return 200, {}
diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py
index 786f5232b2..798b9d3af5 100644
--- a/synapse/replication/http/login.py
+++ b/synapse/replication/http/login.py
@@ -15,8 +15,6 @@
import logging
-from twisted.internet import defer
-
from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
@@ -52,15 +50,14 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
"is_guest": is_guest,
}
- @defer.inlineCallbacks
- def _handle_request(self, request, user_id):
+ async def _handle_request(self, request, user_id):
content = parse_json_object_from_request(request)
device_id = content["device_id"]
initial_display_name = content["initial_display_name"]
is_guest = content["is_guest"]
- device_id, access_token = yield self.registration_handler.register_device(
+ device_id, access_token = await self.registration_handler.register_device(
user_id, device_id, initial_display_name, is_guest
)
diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index b9ce3477ad..b5f5f13a62 100644
--- a/synapse/replication/http/membership.py
+++ b/synapse/replication/http/membership.py
@@ -15,8 +15,6 @@
import logging
-from twisted.internet import defer
-
from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
from synapse.types import Requester, UserID
@@ -65,8 +63,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
"content": content,
}
- @defer.inlineCallbacks
- def _handle_request(self, request, room_id, user_id):
+ async def _handle_request(self, request, room_id, user_id):
content = parse_json_object_from_request(request)
remote_room_hosts = content["remote_room_hosts"]
@@ -79,7 +76,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
logger.info("remote_join: %s into room: %s", user_id, room_id)
- yield self.federation_handler.do_invite_join(
+ await self.federation_handler.do_invite_join(
remote_room_hosts, room_id, user_id, event_content
)
@@ -123,8 +120,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
"remote_room_hosts": remote_room_hosts,
}
- @defer.inlineCallbacks
- def _handle_request(self, request, room_id, user_id):
+ async def _handle_request(self, request, room_id, user_id):
content = parse_json_object_from_request(request)
remote_room_hosts = content["remote_room_hosts"]
@@ -137,7 +133,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
logger.info("remote_reject_invite: %s out of room: %s", user_id, room_id)
try:
- event = yield self.federation_handler.do_remotely_reject_invite(
+ event = await self.federation_handler.do_remotely_reject_invite(
remote_room_hosts, room_id, user_id
)
ret = event.get_pdu_json()
@@ -150,7 +146,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
#
logger.warn("Failed to reject invite: %s", e)
- yield self.store.locally_reject_invite(user_id, room_id)
+ await self.store.locally_reject_invite(user_id, room_id)
ret = {}
return 200, ret
diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py
index 38260256cf..915cfb9430 100644
--- a/synapse/replication/http/register.py
+++ b/synapse/replication/http/register.py
@@ -15,8 +15,6 @@
import logging
-from twisted.internet import defer
-
from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
@@ -74,11 +72,10 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
"address": address,
}
- @defer.inlineCallbacks
- def _handle_request(self, request, user_id):
+ async def _handle_request(self, request, user_id):
content = parse_json_object_from_request(request)
- yield self.registration_handler.register_with_store(
+ await self.registration_handler.register_with_store(
user_id=user_id,
password_hash=content["password_hash"],
was_guest=content["was_guest"],
@@ -117,14 +114,13 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
"""
return {"auth_result": auth_result, "access_token": access_token}
- @defer.inlineCallbacks
- def _handle_request(self, request, user_id):
+ async def _handle_request(self, request, user_id):
content = parse_json_object_from_request(request)
auth_result = content["auth_result"]
access_token = content["access_token"]
- yield self.registration_handler.post_registration_actions(
+ await self.registration_handler.post_registration_actions(
user_id=user_id, auth_result=auth_result, access_token=access_token
)
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index adb9b2f7f4..9bafd60b14 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -87,8 +87,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
return payload
- @defer.inlineCallbacks
- def _handle_request(self, request, event_id):
+ async def _handle_request(self, request, event_id):
with Measure(self.clock, "repl_send_event_parse"):
content = parse_json_object_from_request(request)
@@ -101,7 +100,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
event = EventType(event_dict, internal_metadata, rejected_reason)
requester = Requester.deserialize(self.store, content["requester"])
- context = yield EventContext.deserialize(self.store, content["context"])
+ context = EventContext.deserialize(self.store, content["context"])
ratelimit = content["ratelimit"]
extra_users = [UserID.from_string(u) for u in content["extra_users"]]
@@ -113,7 +112,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
"Got event to send with ID: %s into room: %s", event.event_id, event.room_id
)
- yield self.event_creation_handler.persist_and_notify_client_event(
+ await self.event_creation_handler.persist_and_notify_client_event(
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
)
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 9c1d41421c..86bbcc0eea 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -21,8 +21,6 @@ from six.moves.urllib import parse as urlparse
from canonicaljson import json
-from twisted.internet import defer
-
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
AuthError,
@@ -85,11 +83,10 @@ class RoomCreateRestServlet(TransactionRestServlet):
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(request, self.on_POST, request)
- @defer.inlineCallbacks
- def on_POST(self, request):
- requester = yield self.auth.get_user_by_req(request)
+ async def on_POST(self, request):
+ requester = await self.auth.get_user_by_req(request)
- info = yield self._room_creation_handler.create_room(
+ info = await self._room_creation_handler.create_room(
requester, self.get_room_config(request)
)
@@ -154,15 +151,14 @@ class RoomStateEventRestServlet(TransactionRestServlet):
def on_PUT_no_state_key(self, request, room_id, event_type):
return self.on_PUT(request, room_id, event_type, "")
- @defer.inlineCallbacks
- def on_GET(self, request, room_id, event_type, state_key):
- requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_GET(self, request, room_id, event_type, state_key):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
format = parse_string(
request, "format", default="content", allowed_values=["content", "event"]
)
msg_handler = self.message_handler
- data = yield msg_handler.get_room_data(
+ data = await msg_handler.get_room_data(
user_id=requester.user.to_string(),
room_id=room_id,
event_type=event_type,
@@ -179,9 +175,8 @@ class RoomStateEventRestServlet(TransactionRestServlet):
elif format == "content":
return 200, data.get_dict()["content"]
- @defer.inlineCallbacks
- def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
- requester = yield self.auth.get_user_by_req(request)
+ async def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
+ requester = await self.auth.get_user_by_req(request)
if txn_id:
set_tag("txn_id", txn_id)
@@ -200,7 +195,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
if event_type == EventTypes.Member:
membership = content.get("membership", None)
- event = yield self.room_member_handler.update_membership(
+ event = await self.room_member_handler.update_membership(
requester,
target=UserID.from_string(state_key),
room_id=room_id,
@@ -208,7 +203,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
content=content,
)
else:
- event = yield self.event_creation_handler.create_and_send_nonmember_event(
+ event = await self.event_creation_handler.create_and_send_nonmember_event(
requester, event_dict, txn_id=txn_id
)
@@ -231,9 +226,8 @@ class RoomSendEventRestServlet(TransactionRestServlet):
PATTERNS = "/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)"
register_txn_path(self, PATTERNS, http_server, with_get=True)
- @defer.inlineCallbacks
- def on_POST(self, request, room_id, event_type, txn_id=None):
- requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_POST(self, request, room_id, event_type, txn_id=None):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
content = parse_json_object_from_request(request)
event_dict = {
@@ -246,7 +240,7 @@ class RoomSendEventRestServlet(TransactionRestServlet):
if b"ts" in request.args and requester.app_service:
event_dict["origin_server_ts"] = parse_integer(request, "ts", 0)
- event = yield self.event_creation_handler.create_and_send_nonmember_event(
+ event = await self.event_creation_handler.create_and_send_nonmember_event(
requester, event_dict, txn_id=txn_id
)
@@ -276,9 +270,8 @@ class JoinRoomAliasServlet(TransactionRestServlet):
PATTERNS = "/join/(?P<room_identifier>[^/]*)"
register_txn_path(self, PATTERNS, http_server)
- @defer.inlineCallbacks
- def on_POST(self, request, room_identifier, txn_id=None):
- requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_POST(self, request, room_identifier, txn_id=None):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
try:
content = parse_json_object_from_request(request)
@@ -298,14 +291,14 @@ class JoinRoomAliasServlet(TransactionRestServlet):
elif RoomAlias.is_valid(room_identifier):
handler = self.room_member_handler
room_alias = RoomAlias.from_string(room_identifier)
- room_id, remote_room_hosts = yield handler.lookup_room_alias(room_alias)
+ room_id, remote_room_hosts = await handler.lookup_room_alias(room_alias)
room_id = room_id.to_string()
else:
raise SynapseError(
400, "%s was not legal room ID or room alias" % (room_identifier,)
)
- yield self.room_member_handler.update_membership(
+ await self.room_member_handler.update_membership(
requester=requester,
target=requester.user,
room_id=room_id,
@@ -335,12 +328,11 @@ class PublicRoomListRestServlet(TransactionRestServlet):
self.hs = hs
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_GET(self, request):
+ async def on_GET(self, request):
server = parse_string(request, "server", default=None)
try:
- yield self.auth.get_user_by_req(request, allow_guest=True)
+ await self.auth.get_user_by_req(request, allow_guest=True)
except InvalidClientCredentialsError as e:
# Option to allow servers to require auth when accessing
# /publicRooms via CS API. This is especially helpful in private
@@ -367,19 +359,18 @@ class PublicRoomListRestServlet(TransactionRestServlet):
handler = self.hs.get_room_list_handler()
if server:
- data = yield handler.get_remote_public_room_list(
+ data = await handler.get_remote_public_room_list(
server, limit=limit, since_token=since_token
)
else:
- data = yield handler.get_local_public_room_list(
+ data = await handler.get_local_public_room_list(
limit=limit, since_token=since_token
)
return 200, data
- @defer.inlineCallbacks
- def on_POST(self, request):
- yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_POST(self, request):
+ await self.auth.get_user_by_req(request, allow_guest=True)
server = parse_string(request, "server", default=None)
content = parse_json_object_from_request(request)
@@ -408,7 +399,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
handler = self.hs.get_room_list_handler()
if server:
- data = yield handler.get_remote_public_room_list(
+ data = await handler.get_remote_public_room_list(
server,
limit=limit,
since_token=since_token,
@@ -417,7 +408,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
third_party_instance_id=third_party_instance_id,
)
else:
- data = yield handler.get_local_public_room_list(
+ data = await handler.get_local_public_room_list(
limit=limit,
since_token=since_token,
search_filter=search_filter,
@@ -436,10 +427,9 @@ class RoomMemberListRestServlet(RestServlet):
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_GET(self, request, room_id):
+ async def on_GET(self, request, room_id):
# TODO support Pagination stream API (limit/tokens)
- requester = yield self.auth.get_user_by_req(request)
+ requester = await self.auth.get_user_by_req(request)
handler = self.message_handler
# request the state as of a given event, as identified by a stream token,
@@ -459,7 +449,7 @@ class RoomMemberListRestServlet(RestServlet):
membership = parse_string(request, "membership")
not_membership = parse_string(request, "not_membership")
- events = yield handler.get_state_events(
+ events = await handler.get_state_events(
room_id=room_id,
user_id=requester.user.to_string(),
at_token=at_token,
@@ -488,11 +478,10 @@ class JoinedRoomMemberListRestServlet(RestServlet):
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_GET(self, request, room_id):
- requester = yield self.auth.get_user_by_req(request)
+ async def on_GET(self, request, room_id):
+ requester = await self.auth.get_user_by_req(request)
- users_with_profile = yield self.message_handler.get_joined_members(
+ users_with_profile = await self.message_handler.get_joined_members(
requester, room_id
)
@@ -508,9 +497,8 @@ class RoomMessageListRestServlet(RestServlet):
self.pagination_handler = hs.get_pagination_handler()
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_GET(self, request, room_id):
- requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_GET(self, request, room_id):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
pagination_config = PaginationConfig.from_request(request, default_limit=10)
as_client_event = b"raw" not in request.args
filter_bytes = parse_string(request, b"filter", encoding=None)
@@ -521,7 +509,7 @@ class RoomMessageListRestServlet(RestServlet):
as_client_event = False
else:
event_filter = None
- msgs = yield self.pagination_handler.get_messages(
+ msgs = await self.pagination_handler.get_messages(
room_id=room_id,
requester=requester,
pagin_config=pagination_config,
@@ -541,11 +529,10 @@ class RoomStateRestServlet(RestServlet):
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_GET(self, request, room_id):
- requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_GET(self, request, room_id):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
# Get all the current state for this room
- events = yield self.message_handler.get_state_events(
+ events = await self.message_handler.get_state_events(
room_id=room_id,
user_id=requester.user.to_string(),
is_guest=requester.is_guest,
@@ -562,11 +549,10 @@ class RoomInitialSyncRestServlet(RestServlet):
self.initial_sync_handler = hs.get_initial_sync_handler()
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_GET(self, request, room_id):
- requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_GET(self, request, room_id):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
pagination_config = PaginationConfig.from_request(request)
- content = yield self.initial_sync_handler.room_initial_sync(
+ content = await self.initial_sync_handler.room_initial_sync(
room_id=room_id, requester=requester, pagin_config=pagination_config
)
return 200, content
@@ -584,11 +570,10 @@ class RoomEventServlet(RestServlet):
self._event_serializer = hs.get_event_client_serializer()
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_GET(self, request, room_id, event_id):
- requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_GET(self, request, room_id, event_id):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
try:
- event = yield self.event_handler.get_event(
+ event = await self.event_handler.get_event(
requester.user, room_id, event_id
)
except AuthError:
@@ -599,7 +584,7 @@ class RoomEventServlet(RestServlet):
time_now = self.clock.time_msec()
if event:
- event = yield self._event_serializer.serialize_event(event, time_now)
+ event = await self._event_serializer.serialize_event(event, time_now)
return 200, event
return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
@@ -617,9 +602,8 @@ class RoomEventContextServlet(RestServlet):
self._event_serializer = hs.get_event_client_serializer()
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_GET(self, request, room_id, event_id):
- requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_GET(self, request, room_id, event_id):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
limit = parse_integer(request, "limit", default=10)
@@ -631,7 +615,7 @@ class RoomEventContextServlet(RestServlet):
else:
event_filter = None
- results = yield self.room_context_handler.get_event_context(
+ results = await self.room_context_handler.get_event_context(
requester.user, room_id, event_id, limit, event_filter
)
@@ -639,16 +623,16 @@ class RoomEventContextServlet(RestServlet):
raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
time_now = self.clock.time_msec()
- results["events_before"] = yield self._event_serializer.serialize_events(
+ results["events_before"] = await self._event_serializer.serialize_events(
results["events_before"], time_now
)
- results["event"] = yield self._event_serializer.serialize_event(
+ results["event"] = await self._event_serializer.serialize_event(
results["event"], time_now
)
- results["events_after"] = yield self._event_serializer.serialize_events(
+ results["events_after"] = await self._event_serializer.serialize_events(
results["events_after"], time_now
)
- results["state"] = yield self._event_serializer.serialize_events(
+ results["state"] = await self._event_serializer.serialize_events(
results["state"], time_now
)
@@ -665,11 +649,10 @@ class RoomForgetRestServlet(TransactionRestServlet):
PATTERNS = "/rooms/(?P<room_id>[^/]*)/forget"
register_txn_path(self, PATTERNS, http_server)
- @defer.inlineCallbacks
- def on_POST(self, request, room_id, txn_id=None):
- requester = yield self.auth.get_user_by_req(request, allow_guest=False)
+ async def on_POST(self, request, room_id, txn_id=None):
+ requester = await self.auth.get_user_by_req(request, allow_guest=False)
- yield self.room_member_handler.forget(user=requester.user, room_id=room_id)
+ await self.room_member_handler.forget(user=requester.user, room_id=room_id)
return 200, {}
@@ -696,9 +679,8 @@ class RoomMembershipRestServlet(TransactionRestServlet):
)
register_txn_path(self, PATTERNS, http_server)
- @defer.inlineCallbacks
- def on_POST(self, request, room_id, membership_action, txn_id=None):
- requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_POST(self, request, room_id, membership_action, txn_id=None):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
if requester.is_guest and membership_action not in {
Membership.JOIN,
@@ -714,7 +696,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
content = {}
if membership_action == "invite" and self._has_3pid_invite_keys(content):
- yield self.room_member_handler.do_3pid_invite(
+ await self.room_member_handler.do_3pid_invite(
room_id,
requester.user,
content["medium"],
@@ -735,7 +717,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
if "reason" in content and membership_action in ["kick", "ban"]:
event_content = {"reason": content["reason"]}
- yield self.room_member_handler.update_membership(
+ await self.room_member_handler.update_membership(
requester=requester,
target=target,
room_id=room_id,
@@ -777,12 +759,11 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
register_txn_path(self, PATTERNS, http_server)
- @defer.inlineCallbacks
- def on_POST(self, request, room_id, event_id, txn_id=None):
- requester = yield self.auth.get_user_by_req(request)
+ async def on_POST(self, request, room_id, event_id, txn_id=None):
+ requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
- event = yield self.event_creation_handler.create_and_send_nonmember_event(
+ event = await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.Redaction,
@@ -816,29 +797,28 @@ class RoomTypingRestServlet(RestServlet):
self.typing_handler = hs.get_typing_handler()
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_PUT(self, request, room_id, user_id):
- requester = yield self.auth.get_user_by_req(request)
+ async def on_PUT(self, request, room_id, user_id):
+ requester = await self.auth.get_user_by_req(request)
room_id = urlparse.unquote(room_id)
target_user = UserID.from_string(urlparse.unquote(user_id))
content = parse_json_object_from_request(request)
- yield self.presence_handler.bump_presence_active_time(requester.user)
+ await self.presence_handler.bump_presence_active_time(requester.user)
# Limit timeout to stop people from setting silly typing timeouts.
timeout = min(content.get("timeout", 30000), 120000)
if content["typing"]:
- yield self.typing_handler.started_typing(
+ await self.typing_handler.started_typing(
target_user=target_user,
auth_user=requester.user,
room_id=room_id,
timeout=timeout,
)
else:
- yield self.typing_handler.stopped_typing(
+ await self.typing_handler.stopped_typing(
target_user=target_user, auth_user=requester.user, room_id=room_id
)
@@ -853,14 +833,13 @@ class SearchRestServlet(RestServlet):
self.handlers = hs.get_handlers()
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_POST(self, request):
- requester = yield self.auth.get_user_by_req(request)
+ async def on_POST(self, request):
+ requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
batch = parse_string(request, "next_batch")
- results = yield self.handlers.search_handler.search(
+ results = await self.handlers.search_handler.search(
requester.user, content, batch
)
@@ -875,11 +854,10 @@ class JoinedRoomsRestServlet(RestServlet):
self.store = hs.get_datastore()
self.auth = hs.get_auth()
- @defer.inlineCallbacks
- def on_GET(self, request):
- requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+ async def on_GET(self, request):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
- room_ids = yield self.store.get_rooms_for_user(requester.user.to_string())
+ room_ids = await self.store.get_rooms_for_user(requester.user.to_string())
return 200, {"joined_rooms": list(room_ids)}
diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py
index b3bf8567e1..67cbc37312 100644
--- a/synapse/rest/client/v2_alpha/read_marker.py
+++ b/synapse/rest/client/v2_alpha/read_marker.py
@@ -15,8 +15,6 @@
import logging
-from twisted.internet import defer
-
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from ._base import client_patterns
@@ -34,17 +32,16 @@ class ReadMarkerRestServlet(RestServlet):
self.read_marker_handler = hs.get_read_marker_handler()
self.presence_handler = hs.get_presence_handler()
- @defer.inlineCallbacks
- def on_POST(self, request, room_id):
- requester = yield self.auth.get_user_by_req(request)
+ async def on_POST(self, request, room_id):
+ requester = await self.auth.get_user_by_req(request)
- yield self.presence_handler.bump_presence_active_time(requester.user)
+ await self.presence_handler.bump_presence_active_time(requester.user)
body = parse_json_object_from_request(request)
read_event_id = body.get("m.read", None)
if read_event_id:
- yield self.receipts_handler.received_client_receipt(
+ await self.receipts_handler.received_client_receipt(
room_id,
"m.read",
user_id=requester.user.to_string(),
@@ -53,7 +50,7 @@ class ReadMarkerRestServlet(RestServlet):
read_marker_event_id = body.get("m.fully_read", None)
if read_marker_event_id:
- yield self.read_marker_handler.received_client_read_marker(
+ await self.read_marker_handler.received_client_read_marker(
room_id,
user_id=requester.user.to_string(),
event_id=read_marker_event_id,
diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py
index 0dab03d227..92555bd4a9 100644
--- a/synapse/rest/client/v2_alpha/receipts.py
+++ b/synapse/rest/client/v2_alpha/receipts.py
@@ -15,8 +15,6 @@
import logging
-from twisted.internet import defer
-
from synapse.api.errors import SynapseError
from synapse.http.servlet import RestServlet
@@ -39,16 +37,15 @@ class ReceiptRestServlet(RestServlet):
self.receipts_handler = hs.get_receipts_handler()
self.presence_handler = hs.get_presence_handler()
- @defer.inlineCallbacks
- def on_POST(self, request, room_id, receipt_type, event_id):
- requester = yield self.auth.get_user_by_req(request)
+ async def on_POST(self, request, room_id, receipt_type, event_id):
+ requester = await self.auth.get_user_by_req(request)
if receipt_type != "m.read":
raise SynapseError(400, "Receipt type must be 'm.read'")
- yield self.presence_handler.bump_presence_active_time(requester.user)
+ await self.presence_handler.bump_presence_active_time(requester.user)
- yield self.receipts_handler.received_client_receipt(
+ await self.receipts_handler.received_client_receipt(
room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id
)
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index a883c8adda..541a6b0e10 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -112,9 +112,14 @@ class SyncRestServlet(RestServlet):
full_state = parse_boolean(request, "full_state", default=False)
logger.debug(
- "/sync: user=%r, timeout=%r, since=%r,"
- " set_presence=%r, filter_id=%r, device_id=%r"
- % (user, timeout, since, set_presence, filter_id, device_id)
+ "/sync: user=%r, timeout=%r, since=%r, "
+ "set_presence=%r, filter_id=%r, device_id=%r",
+ user,
+ timeout,
+ since,
+ set_presence,
+ filter_id,
+ device_id,
)
request_key = (user, timeout, since, filter_id, full_state, device_id)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 0c68c3aad5..094ebad770 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -117,8 +117,10 @@ class PreviewUrlResource(DirectServeResource):
pattern = entry[attrib]
value = getattr(url_tuple, attrib)
logger.debug(
- ("Matching attrib '%s' with value '%s' against" " pattern '%s'")
- % (attrib, value, pattern)
+ "Matching attrib '%s' with value '%s' against" " pattern '%s'",
+ attrib,
+ value,
+ pattern,
)
if value is None:
@@ -186,7 +188,7 @@ class PreviewUrlResource(DirectServeResource):
media_info = yield self._download_url(url, user)
- logger.debug("got media_info of '%s'" % media_info)
+ logger.debug("got media_info of '%s'", media_info)
if _is_media(media_info["media_type"]):
file_id = media_info["filesystem_id"]
@@ -254,7 +256,7 @@ class PreviewUrlResource(DirectServeResource):
og["og:image:width"] = dims["width"]
og["og:image:height"] = dims["height"]
else:
- logger.warn("Couldn't get dims for %s" % og["og:image"])
+ logger.warn("Couldn't get dims for %s", og["og:image"])
og["og:image"] = "mxc://%s/%s" % (
self.server_name,
@@ -268,7 +270,7 @@ class PreviewUrlResource(DirectServeResource):
logger.warn("Failed to find any OG data in %s", url)
og = {}
- logger.debug("Calculated OG for %s as %s" % (url, og))
+ logger.debug("Calculated OG for %s as %s", url, og)
jsonog = json.dumps(og)
@@ -297,7 +299,7 @@ class PreviewUrlResource(DirectServeResource):
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
try:
- logger.debug("Trying to get url '%s'" % url)
+ logger.debug("Trying to get url '%s'", url)
length, headers, uri, code = yield self.client.get_file(
url, output_stream=f, max_size=self.max_spider_size
)
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index 81c4aff496..c0e7f475c9 100644
--- a/synapse/server_notices/resource_limits_server_notices.py
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -20,6 +20,7 @@ from twisted.internet import defer
from synapse.api.constants import (
EventTypes,
+ LimitBlockingTypes,
ServerNoticeLimitReached,
ServerNoticeMsgType,
)
@@ -70,7 +71,7 @@ class ResourceLimitsServerNotices(object):
return
if not self._server_notices_manager.is_enabled():
- # Don't try and send server notices unles they've been enabled
+ # Don't try and send server notices unless they've been enabled
return
timestamp = yield self._store.user_last_seen_monthly_active(user_id)
@@ -79,8 +80,6 @@ class ResourceLimitsServerNotices(object):
# In practice, not sure we can ever get here
return
- # Determine current state of room
-
room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id)
if not room_id:
@@ -88,51 +87,86 @@ class ResourceLimitsServerNotices(object):
return
yield self._check_and_set_tags(user_id, room_id)
+
+ # Determine current state of room
currently_blocked, ref_events = yield self._is_room_currently_blocked(room_id)
+ limit_msg = None
+ limit_type = None
try:
- # Normally should always pass in user_id if you have it, but in
- # this case are checking what would happen to other users if they
- # were to arrive.
- try:
- yield self._auth.check_auth_blocking()
- is_auth_blocking = False
- except ResourceLimitError as e:
- is_auth_blocking = True
- event_content = e.msg
- event_limit_type = e.limit_type
-
- if currently_blocked and not is_auth_blocking:
- # Room is notifying of a block, when it ought not to be.
- # Remove block notification
- content = {"pinned": ref_events}
- yield self._server_notices_manager.send_notice(
- user_id, content, EventTypes.Pinned, ""
- )
+ # Normally should always pass in user_id to check_auth_blocking
+ # if you have it, but in this case are checking what would happen
+ # to other users if they were to arrive.
+ yield self._auth.check_auth_blocking()
+ except ResourceLimitError as e:
+ limit_msg = e.msg
+ limit_type = e.limit_type
- elif not currently_blocked and is_auth_blocking:
+ try:
+ if (
+ limit_type == LimitBlockingTypes.MONTHLY_ACTIVE_USER
+ and not self._config.mau_limit_alerting
+ ):
+ # We have hit the MAU limit, but MAU alerting is disabled:
+ # reset room if necessary and return
+ if currently_blocked:
+ self._remove_limit_block_notification(user_id, ref_events)
+ return
+
+ if currently_blocked and not limit_msg:
+ # Room is notifying of a block, when it ought not to be.
+ yield self._remove_limit_block_notification(user_id, ref_events)
+ elif not currently_blocked and limit_msg:
# Room is not notifying of a block, when it ought to be.
- # Add block notification
- content = {
- "body": event_content,
- "msgtype": ServerNoticeMsgType,
- "server_notice_type": ServerNoticeLimitReached,
- "admin_contact": self._config.admin_contact,
- "limit_type": event_limit_type,
- }
- event = yield self._server_notices_manager.send_notice(
- user_id, content, EventTypes.Message
+ yield self._apply_limit_block_notification(
+ user_id, limit_msg, limit_type
)
-
- content = {"pinned": [event.event_id]}
- yield self._server_notices_manager.send_notice(
- user_id, content, EventTypes.Pinned, ""
- )
-
except SynapseError as e:
logger.error("Error sending resource limits server notice: %s", e)
@defer.inlineCallbacks
+ def _remove_limit_block_notification(self, user_id, ref_events):
+ """Utility method to remove limit block notifications from the server
+ notices room.
+
+ Args:
+ user_id (str): user to notify
+ ref_events (list[str]): The event_ids of pinned events that are unrelated to
+ limit blocking and need to be preserved.
+ """
+ content = {"pinned": ref_events}
+ yield self._server_notices_manager.send_notice(
+ user_id, content, EventTypes.Pinned, ""
+ )
+
+ @defer.inlineCallbacks
+ def _apply_limit_block_notification(self, user_id, event_body, event_limit_type):
+ """Utility method to apply limit block notifications in the server
+ notices room.
+
+ Args:
+ user_id (str): user to notify
+ event_body(str): The human readable text that describes the block.
+ event_limit_type(str): Specifies the type of block e.g. monthly active user
+ limit has been exceeded.
+ """
+ content = {
+ "body": event_body,
+ "msgtype": ServerNoticeMsgType,
+ "server_notice_type": ServerNoticeLimitReached,
+ "admin_contact": self._config.admin_contact,
+ "limit_type": event_limit_type,
+ }
+ event = yield self._server_notices_manager.send_notice(
+ user_id, content, EventTypes.Message
+ )
+
+ content = {"pinned": [event.event_id]}
+ yield self._server_notices_manager.send_notice(
+ user_id, content, EventTypes.Pinned, ""
+ )
+
+ @defer.inlineCallbacks
def _check_and_set_tags(self, user_id, room_id):
"""
Since server notices rooms were originally not with tags,
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 80b57a948c..37d469ffd7 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -94,13 +94,16 @@ class BackgroundUpdateStore(SQLBaseStore):
self._all_done = False
def start_doing_background_updates(self):
- run_as_background_process("background_updates", self._run_background_updates)
+ run_as_background_process("background_updates", self.run_background_updates)
@defer.inlineCallbacks
- def _run_background_updates(self):
+ def run_background_updates(self, sleep=True):
logger.info("Starting background schema updates")
while True:
- yield self.hs.get_clock().sleep(self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0)
+ if sleep:
+ yield self.hs.get_clock().sleep(
+ self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0
+ )
try:
result = yield self.do_next_background_update(
diff --git a/synapse/storage/data_stores/main/e2e_room_keys.py b/synapse/storage/data_stores/main/e2e_room_keys.py
index ef88e79293..1cbbae5b63 100644
--- a/synapse/storage/data_stores/main/e2e_room_keys.py
+++ b/synapse/storage/data_stores/main/e2e_room_keys.py
@@ -321,9 +321,17 @@ class EndToEndRoomKeyStore(SQLBaseStore):
def _delete_e2e_room_keys_version_txn(txn):
if version is None:
this_version = self._get_current_version(txn, user_id)
+ if this_version is None:
+ raise StoreError(404, "No current backup version")
else:
this_version = version
+ self._simple_delete_txn(
+ txn,
+ table="e2e_room_keys",
+ keyvalues={"user_id": user_id, "version": this_version},
+ )
+
return self._simple_update_one_txn(
txn,
table="e2e_room_keys_versions",
diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py
index f5c3ed9dc2..a0bc6f2d18 100644
--- a/synapse/storage/data_stores/main/end_to_end_keys.py
+++ b/synapse/storage/data_stores/main/end_to_end_keys.py
@@ -248,6 +248,73 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
return self.runInteraction("count_e2e_one_time_keys", _count_e2e_one_time_keys)
+ def _get_e2e_cross_signing_key_txn(self, txn, user_id, key_type, from_user_id=None):
+ """Returns a user's cross-signing key.
+
+ Args:
+ txn (twisted.enterprise.adbapi.Connection): db connection
+ user_id (str): the user whose key is being requested
+ key_type (str): the type of key that is being set: either 'master'
+ for a master key, 'self_signing' for a self-signing key, or
+ 'user_signing' for a user-signing key
+ from_user_id (str): if specified, signatures made by this user on
+ the key will be included in the result
+
+ Returns:
+ dict of the key data or None if not found
+ """
+ sql = (
+ "SELECT keydata "
+ " FROM e2e_cross_signing_keys "
+ " WHERE user_id = ? AND keytype = ? ORDER BY stream_id DESC LIMIT 1"
+ )
+ txn.execute(sql, (user_id, key_type))
+ row = txn.fetchone()
+ if not row:
+ return None
+ key = json.loads(row[0])
+
+ device_id = None
+ for k in key["keys"].values():
+ device_id = k
+
+ if from_user_id is not None:
+ sql = (
+ "SELECT key_id, signature "
+ " FROM e2e_cross_signing_signatures "
+ " WHERE user_id = ? "
+ " AND target_user_id = ? "
+ " AND target_device_id = ? "
+ )
+ txn.execute(sql, (from_user_id, user_id, device_id))
+ row = txn.fetchone()
+ if row:
+ key.setdefault("signatures", {}).setdefault(from_user_id, {})[
+ row[0]
+ ] = row[1]
+
+ return key
+
+ def get_e2e_cross_signing_key(self, user_id, key_type, from_user_id=None):
+ """Returns a user's cross-signing key.
+
+ Args:
+ user_id (str): the user whose self-signing key is being requested
+ key_type (str): the type of cross-signing key to get
+ from_user_id (str): if specified, signatures made by this user on
+ the self-signing key will be included in the result
+
+ Returns:
+ dict of the key data or None if not found
+ """
+ return self.runInteraction(
+ "get_e2e_cross_signing_key",
+ self._get_e2e_cross_signing_key_txn,
+ user_id,
+ key_type,
+ from_user_id,
+ )
+
class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
def set_e2e_device_keys(self, user_id, device_id, time_now, device_keys):
@@ -426,73 +493,6 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
key,
)
- def _get_e2e_cross_signing_key_txn(self, txn, user_id, key_type, from_user_id=None):
- """Returns a user's cross-signing key.
-
- Args:
- txn (twisted.enterprise.adbapi.Connection): db connection
- user_id (str): the user whose key is being requested
- key_type (str): the type of key that is being set: either 'master'
- for a master key, 'self_signing' for a self-signing key, or
- 'user_signing' for a user-signing key
- from_user_id (str): if specified, signatures made by this user on
- the key will be included in the result
-
- Returns:
- dict of the key data or None if not found
- """
- sql = (
- "SELECT keydata "
- " FROM e2e_cross_signing_keys "
- " WHERE user_id = ? AND keytype = ? ORDER BY stream_id DESC LIMIT 1"
- )
- txn.execute(sql, (user_id, key_type))
- row = txn.fetchone()
- if not row:
- return None
- key = json.loads(row[0])
-
- device_id = None
- for k in key["keys"].values():
- device_id = k
-
- if from_user_id is not None:
- sql = (
- "SELECT key_id, signature "
- " FROM e2e_cross_signing_signatures "
- " WHERE user_id = ? "
- " AND target_user_id = ? "
- " AND target_device_id = ? "
- )
- txn.execute(sql, (from_user_id, user_id, device_id))
- row = txn.fetchone()
- if row:
- key.setdefault("signatures", {}).setdefault(from_user_id, {})[
- row[0]
- ] = row[1]
-
- return key
-
- def get_e2e_cross_signing_key(self, user_id, key_type, from_user_id=None):
- """Returns a user's cross-signing key.
-
- Args:
- user_id (str): the user whose self-signing key is being requested
- key_type (str): the type of cross-signing key to get
- from_user_id (str): if specified, signatures made by this user on
- the self-signing key will be included in the result
-
- Returns:
- dict of the key data or None if not found
- """
- return self.runInteraction(
- "get_e2e_cross_signing_key",
- self._get_e2e_cross_signing_key_txn,
- user_id,
- key_type,
- from_user_id,
- )
-
def store_e2e_cross_signing_signatures(self, user_id, signatures):
"""Stores cross-signing signatures.
diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py
index a470a48e0f..90bef0cd2c 100644
--- a/synapse/storage/data_stores/main/event_federation.py
+++ b/synapse/storage/data_stores/main/event_federation.py
@@ -364,9 +364,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
)
def _get_backfill_events(self, txn, room_id, event_list, limit):
- logger.debug(
- "_get_backfill_events: %s, %s, %s", room_id, repr(event_list), limit
- )
+ logger.debug("_get_backfill_events: %s, %r, %s", room_id, event_list, limit)
event_results = set()
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 813f34528c..7c3607f308 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -1881,12 +1881,11 @@ class EventsStore(
logger.info("[purge] done")
- @defer.inlineCallbacks
- def is_event_after(self, event_id1, event_id2):
+ async def is_event_after(self, event_id1, event_id2):
"""Returns True if event_id1 is after event_id2 in the stream
"""
- to_1, so_1 = yield self._get_event_ordering(event_id1)
- to_2, so_2 = yield self._get_event_ordering(event_id2)
+ to_1, so_1 = await self._get_event_ordering(event_id1)
+ to_2, so_2 = await self._get_event_ordering(event_id2)
return (to_1, so_1) > (to_2, so_2)
@cachedInlineCallbacks(max_entries=5000)
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index 4428e5c55d..67bb1b6f60 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -201,13 +201,17 @@ class RoomWorkerStore(SQLBaseStore):
where_clauses.append(
"""
(
- name LIKE ?
- OR topic LIKE ?
- OR canonical_alias LIKE ?
+ LOWER(name) LIKE ?
+ OR LOWER(topic) LIKE ?
+ OR LOWER(canonical_alias) LIKE ?
)
"""
)
- query_args += [search_term, search_term, search_term]
+ query_args += [
+ search_term.lower(),
+ search_term.lower(),
+ search_term.lower(),
+ ]
where_clause = ""
if where_clauses:
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py
index e47ab604dd..bc04bfd7d4 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/data_stores/main/roommember.py
@@ -720,7 +720,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
# See bulk_get_push_rules_for_room for how we work around this.
assert state_group is not None
- cache = self._get_joined_hosts_cache(room_id)
+ cache = yield self._get_joined_hosts_cache(room_id)
joined_hosts = yield cache.get_destinations(state_entry)
return joined_hosts
diff --git a/synapse/storage/data_stores/main/schema/delta/56/delete_keys_from_deleted_backups.sql b/synapse/storage/data_stores/main/schema/delta/56/delete_keys_from_deleted_backups.sql
new file mode 100644
index 0000000000..1d2ddb1b1a
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/56/delete_keys_from_deleted_backups.sql
@@ -0,0 +1,25 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* delete room keys that belong to deleted room key version, or to room key
+ * versions that don't exist (anymore)
+ */
+DELETE FROM e2e_room_keys
+WHERE version NOT IN (
+ SELECT version
+ FROM e2e_room_keys_versions
+ WHERE e2e_room_keys.user_id = e2e_room_keys_versions.user_id
+ AND e2e_room_keys_versions.deleted = 0
+);
diff --git a/synapse/storage/schema/delta/56/hidden_devices.sql b/synapse/storage/data_stores/main/schema/delta/56/hidden_devices.sql
index 67f8b20297..67f8b20297 100644
--- a/synapse/storage/schema/delta/56/hidden_devices.sql
+++ b/synapse/storage/data_stores/main/schema/delta/56/hidden_devices.sql
diff --git a/synapse/storage/schema/delta/56/signing_keys.sql b/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql
index 27a96123e3..27a96123e3 100644
--- a/synapse/storage/schema/delta/56/signing_keys.sql
+++ b/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index d54442e5fa..9b2207075b 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -15,6 +15,7 @@
import logging
from collections import namedtuple
+from typing import Iterable, Tuple
from six import iteritems, itervalues
from six.moves import range
@@ -23,6 +24,8 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes
from synapse.api.errors import NotFoundError
+from synapse.events import EventBase
+from synapse.events.snapshot import EventContext
from synapse.storage._base import SQLBaseStore
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
@@ -1215,7 +1218,9 @@ class StateStore(StateGroupWorkerStore, StateBackgroundUpdateStore):
def __init__(self, db_conn, hs):
super(StateStore, self).__init__(db_conn, hs)
- def _store_event_state_mappings_txn(self, txn, events_and_contexts):
+ def _store_event_state_mappings_txn(
+ self, txn, events_and_contexts: Iterable[Tuple[EventBase, EventContext]]
+ ):
state_groups = {}
for event, context in events_and_contexts:
if event.internal_metadata.is_outlier():
diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py
index 5ab639b2ad..4d59b7833f 100644
--- a/synapse/storage/data_stores/main/stats.py
+++ b/synapse/storage/data_stores/main/stats.py
@@ -332,7 +332,7 @@ class StatsStore(StateDeltasStore):
def _bulk_update_stats_delta_txn(txn):
for stats_type, stats_updates in updates.items():
for stats_id, fields in stats_updates.items():
- logger.info(
+ logger.debug(
"Updating %s stats for %s: %s", stats_type, stats_id, fields
)
self._update_stats_delta_txn(
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 804dbca443..b60a604474 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -86,11 +86,12 @@ class ObservableDeferred(object):
deferred.addCallbacks(callback, errback)
- def observe(self):
+ def observe(self) -> defer.Deferred:
"""Observe the underlying deferred.
- Can return either a deferred if the underlying deferred is still pending
- (or has failed), or the actual value. Callers may need to use maybeDeferred.
+ This returns a brand new deferred that is resolved when the underlying
+ deferred is resolved. Interacting with the returned deferred does not
+ effect the underdlying deferred.
"""
if not self._result:
d = defer.Deferred()
@@ -105,7 +106,7 @@ class ObservableDeferred(object):
return d
else:
success, res = self._result
- return res if success else defer.fail(res)
+ return defer.succeed(res) if success else defer.fail(res)
def observers(self):
return self._observers
@@ -138,7 +139,7 @@ def concurrently_execute(func, args, limit):
the number of concurrent executions.
Args:
- func (func): Function to execute, should return a deferred.
+ func (func): Function to execute, should return a deferred or coroutine.
args (list): List of arguments to pass to func, each invocation of func
gets a signle argument.
limit (int): Maximum number of conccurent executions.
@@ -148,11 +149,10 @@ def concurrently_execute(func, args, limit):
"""
it = iter(args)
- @defer.inlineCallbacks
- def _concurrently_execute_inner():
+ async def _concurrently_execute_inner():
try:
while True:
- yield func(next(it))
+ await maybe_awaitable(func(next(it)))
except StopIteration:
pass
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 5ac2530a6a..0e8da27f53 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -438,7 +438,7 @@ class CacheDescriptor(_CacheDescriptorBase):
if isinstance(cached_result_d, ObservableDeferred):
observer = cached_result_d.observe()
else:
- observer = cached_result_d
+ observer = defer.succeed(cached_result_d)
except KeyError:
ret = defer.maybeDeferred(
@@ -482,9 +482,8 @@ class CacheListDescriptor(_CacheDescriptorBase):
Given a list of keys it looks in the cache to find any hits, then passes
the list of missing keys to the wrapped function.
- Once wrapped, the function returns either a Deferred which resolves to
- the list of results, or (if all results were cached), just the list of
- results.
+ Once wrapped, the function returns a Deferred which resolves to the list
+ of results.
"""
def __init__(
@@ -618,7 +617,7 @@ class CacheListDescriptor(_CacheDescriptorBase):
)
return make_deferred_yieldable(d)
else:
- return results
+ return defer.succeed(results)
obj.__dict__[self.orig.__name__] = wrapped
diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py
index fa404b9d75..ab7d03af3a 100644
--- a/synapse/util/versionstring.py
+++ b/synapse/util/versionstring.py
@@ -42,6 +42,7 @@ def get_version_string(module):
try:
null = open(os.devnull, "w")
cwd = os.path.dirname(os.path.abspath(module.__file__))
+
try:
git_branch = (
subprocess.check_output(
@@ -51,7 +52,8 @@ def get_version_string(module):
.decode("ascii")
)
git_branch = "b=" + git_branch
- except subprocess.CalledProcessError:
+ except (subprocess.CalledProcessError, FileNotFoundError):
+ # FileNotFoundError can arise when git is not installed
git_branch = ""
try:
@@ -63,7 +65,7 @@ def get_version_string(module):
.decode("ascii")
)
git_tag = "t=" + git_tag
- except subprocess.CalledProcessError:
+ except (subprocess.CalledProcessError, FileNotFoundError):
git_tag = ""
try:
@@ -74,7 +76,7 @@ def get_version_string(module):
.strip()
.decode("ascii")
)
- except subprocess.CalledProcessError:
+ except (subprocess.CalledProcessError, FileNotFoundError):
git_commit = ""
try:
@@ -89,7 +91,7 @@ def get_version_string(module):
)
git_dirty = "dirty" if is_dirty else ""
- except subprocess.CalledProcessError:
+ except (subprocess.CalledProcessError, FileNotFoundError):
git_dirty = ""
if git_branch or git_tag or git_commit or git_dirty:
diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index d5c8bd7612..e0075ccd32 100644
--- a/tests/handlers/test_stats.py
+++ b/tests/handlers/test_stats.py
@@ -607,6 +607,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
"""
self.hs.config.stats_enabled = False
+ self.handler.stats_enabled = False
u1 = self.register_user("u1", "pass")
u1token = self.login("u1", "pass")
@@ -618,6 +619,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.assertIsNone(self._get_current_stats("user", u1))
self.hs.config.stats_enabled = True
+ self.handler.stats_enabled = True
self._perform_background_initial_update()
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 67f1013051..f360c8e965 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -144,6 +144,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.datastore.get_to_device_stream_token = lambda: 0
self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
+ self.datastore.set_received_txn_response = lambda *args, **kwargs: defer.succeed(
+ None
+ )
def test_started_typing_local(self):
self.room_members = [U_APPLE, U_BANANA]
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index cdf89e3383..eb540e34f6 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -17,7 +17,7 @@ from mock import Mock
from twisted.internet import defer
-from synapse.api.constants import EventTypes, ServerNoticeMsgType
+from synapse.api.constants import EventTypes, LimitBlockingTypes, ServerNoticeMsgType
from synapse.api.errors import ResourceLimitError
from synapse.server_notices.resource_limits_server_notices import (
ResourceLimitsServerNotices,
@@ -133,7 +133,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase):
self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
# Would be better to check contents, but 2 calls == set blocking event
- self.assertTrue(self._send_notice.call_count == 2)
+ self.assertEqual(self._send_notice.call_count, 2)
def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self):
"""
@@ -158,6 +158,61 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase):
self._send_notice.assert_not_called()
+ def test_maybe_send_server_notice_when_alerting_suppressed_room_unblocked(self):
+ """
+ Test that when server is over MAU limit and alerting is suppressed, then
+ an alert message is not sent into the room
+ """
+ self.hs.config.mau_limit_alerting = False
+ self._rlsn._auth.check_auth_blocking = Mock(
+ side_effect=ResourceLimitError(
+ 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER
+ )
+ )
+ self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
+
+ self.assertTrue(self._send_notice.call_count == 0)
+
+ def test_check_hs_disabled_unaffected_by_mau_alert_suppression(self):
+ """
+ Test that when a server is disabled, that MAU limit alerting is ignored.
+ """
+ self.hs.config.mau_limit_alerting = False
+ self._rlsn._auth.check_auth_blocking = Mock(
+ side_effect=ResourceLimitError(
+ 403, "foo", limit_type=LimitBlockingTypes.HS_DISABLED
+ )
+ )
+ self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
+
+ # Would be better to check contents, but 2 calls == set blocking event
+ self.assertEqual(self._send_notice.call_count, 2)
+
+ def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self):
+ """
+ When the room is already in a blocked state, test that when alerting
+ is suppressed that the room is returned to an unblocked state.
+ """
+ self.hs.config.mau_limit_alerting = False
+ self._rlsn._auth.check_auth_blocking = Mock(
+ side_effect=ResourceLimitError(
+ 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER
+ )
+ )
+ self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock(
+ return_value=defer.succeed((True, []))
+ )
+
+ mock_event = Mock(
+ type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType}
+ )
+ self._rlsn._store.get_events = Mock(
+ return_value=defer.succeed({"123": mock_event})
+ )
+ self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
+
+ self._send_notice.assert_called_once()
+
class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
def prepare(self, reactor, clock, hs):
diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py
index dd49a14524..9b81b536f5 100644
--- a/tests/storage/test__base.py
+++ b/tests/storage/test__base.py
@@ -197,7 +197,7 @@ class CacheDecoratorTestCase(unittest.TestCase):
a.func.prefill(("foo",), ObservableDeferred(d))
- self.assertEquals(a.func("foo"), d.result)
+ self.assertEquals(a.func("foo").result, d.result)
self.assertEquals(callcount[0], 0)
@defer.inlineCallbacks
diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py
new file mode 100644
index 0000000000..d128fde441
--- /dev/null
+++ b/tests/storage/test_e2e_room_keys.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tests import unittest
+
+# sample room_key data for use in the tests
+room_key = {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": False,
+ "session_data": "SSBBTSBBIEZJU0gK",
+}
+
+
+class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase):
+ def make_homeserver(self, reactor, clock):
+ hs = self.setup_test_homeserver("server", http_client=None)
+ self.store = hs.get_datastore()
+ return hs
+
+ def test_room_keys_version_delete(self):
+ # test that deleting a room key backup deletes the keys
+ version1 = self.get_success(
+ self.store.create_e2e_room_keys_version(
+ "user_id", {"algorithm": "rot13", "auth_data": {}}
+ )
+ )
+
+ self.get_success(
+ self.store.set_e2e_room_key(
+ "user_id", version1, "room", "session", room_key
+ )
+ )
+
+ version2 = self.get_success(
+ self.store.create_e2e_room_keys_version(
+ "user_id", {"algorithm": "rot13", "auth_data": {}}
+ )
+ )
+
+ self.get_success(
+ self.store.set_e2e_room_key(
+ "user_id", version2, "room", "session", room_key
+ )
+ )
+
+ # make sure the keys were stored properly
+ keys = self.get_success(self.store.get_e2e_room_keys("user_id", version1))
+ self.assertEqual(len(keys["rooms"]), 1)
+
+ keys = self.get_success(self.store.get_e2e_room_keys("user_id", version2))
+ self.assertEqual(len(keys["rooms"]), 1)
+
+ # delete version1
+ self.get_success(self.store.delete_e2e_room_keys_version("user_id", version1))
+
+ # make sure the key from version1 is gone, and the key from version2 is
+ # still there
+ keys = self.get_success(self.store.get_e2e_room_keys("user_id", version1))
+ self.assertEqual(len(keys["rooms"]), 0)
+
+ keys = self.get_success(self.store.get_e2e_room_keys("user_id", version2))
+ self.assertEqual(len(keys["rooms"]), 1)
diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py
index 5713870f48..39e360fe24 100644
--- a/tests/util/caches/test_descriptors.py
+++ b/tests/util/caches/test_descriptors.py
@@ -310,14 +310,14 @@ class DescriptorTestCase(unittest.TestCase):
obj.mock.return_value = ["spam", "eggs"]
r = obj.fn(1, 2)
- self.assertEqual(r, ["spam", "eggs"])
+ self.assertEqual(r.result, ["spam", "eggs"])
obj.mock.assert_called_once_with(1, 2)
obj.mock.reset_mock()
# a call with different params should call the mock again
obj.mock.return_value = ["chips"]
r = obj.fn(1, 3)
- self.assertEqual(r, ["chips"])
+ self.assertEqual(r.result, ["chips"])
obj.mock.assert_called_once_with(1, 3)
obj.mock.reset_mock()
@@ -325,9 +325,9 @@ class DescriptorTestCase(unittest.TestCase):
self.assertEqual(len(obj.fn.cache.cache), 3)
r = obj.fn(1, 2)
- self.assertEqual(r, ["spam", "eggs"])
+ self.assertEqual(r.result, ["spam", "eggs"])
r = obj.fn(1, 3)
- self.assertEqual(r, ["chips"])
+ self.assertEqual(r.result, ["chips"])
obj.mock.assert_not_called()
def test_cache_iterable_with_sync_exception(self):
diff --git a/tests/utils.py b/tests/utils.py
index 2808eea933..7dc9bdc505 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -137,7 +137,6 @@ def default_config(name, parse=False):
"limit_usage_by_mau": False,
"hs_disabled": False,
"hs_disabled_message": "",
- "hs_disabled_limit_type": "",
"max_mau_value": 50,
"mau_trial_days": 0,
"mau_stats_only": False,
diff --git a/tox.ini b/tox.ini
index 3cd2c5e633..50b6afe611 100644
--- a/tox.ini
+++ b/tox.ini
@@ -114,16 +114,16 @@ skip_install = True
basepython = python3.6
deps =
flake8
- black
+ black==19.3b0 # We pin so that our tests don't start failing on new releases of black.
commands =
python -m black --check --diff .
- /bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/hash_password scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}"
+ /bin/sh -c "flake8 synapse tests scripts scripts-dev synctl {env:PEP8SUFFIX:}"
{toxinidir}/scripts-dev/config-lint.sh
[testenv:check_isort]
skip_install = True
deps = isort
-commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests"
+commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests scripts-dev scripts"
[testenv:check-newsfragment]
skip_install = True
|